1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
91 #include "llvm/Analysis/ProfileSummaryInfo.h"
92 #include "llvm/Analysis/ScalarEvolution.h"
93 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
94 #include "llvm/Analysis/TargetLibraryInfo.h"
95 #include "llvm/Analysis/TargetTransformInfo.h"
96 #include "llvm/Analysis/VectorUtils.h"
97 #include "llvm/IR/Attributes.h"
98 #include "llvm/IR/BasicBlock.h"
99 #include "llvm/IR/CFG.h"
100 #include "llvm/IR/Constant.h"
101 #include "llvm/IR/Constants.h"
102 #include "llvm/IR/DataLayout.h"
103 #include "llvm/IR/DebugInfoMetadata.h"
104 #include "llvm/IR/DebugLoc.h"
105 #include "llvm/IR/DerivedTypes.h"
106 #include "llvm/IR/DiagnosticInfo.h"
107 #include "llvm/IR/Dominators.h"
108 #include "llvm/IR/Function.h"
109 #include "llvm/IR/IRBuilder.h"
110 #include "llvm/IR/InstrTypes.h"
111 #include "llvm/IR/Instruction.h"
112 #include "llvm/IR/Instructions.h"
113 #include "llvm/IR/IntrinsicInst.h"
114 #include "llvm/IR/Intrinsics.h"
115 #include "llvm/IR/Metadata.h"
116 #include "llvm/IR/Module.h"
117 #include "llvm/IR/Operator.h"
118 #include "llvm/IR/PatternMatch.h"
119 #include "llvm/IR/Type.h"
120 #include "llvm/IR/Use.h"
121 #include "llvm/IR/User.h"
122 #include "llvm/IR/Value.h"
123 #include "llvm/IR/ValueHandle.h"
124 #include "llvm/IR/Verifier.h"
125 #include "llvm/InitializePasses.h"
126 #include "llvm/Pass.h"
127 #include "llvm/Support/Casting.h"
128 #include "llvm/Support/CommandLine.h"
129 #include "llvm/Support/Compiler.h"
130 #include "llvm/Support/Debug.h"
131 #include "llvm/Support/ErrorHandling.h"
132 #include "llvm/Support/InstructionCost.h"
133 #include "llvm/Support/MathExtras.h"
134 #include "llvm/Support/raw_ostream.h"
135 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
136 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
137 #include "llvm/Transforms/Utils/LoopSimplify.h"
138 #include "llvm/Transforms/Utils/LoopUtils.h"
139 #include "llvm/Transforms/Utils/LoopVersioning.h"
140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
141 #include "llvm/Transforms/Utils/SizeOpts.h"
142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
143 #include <algorithm>
144 #include <cassert>
145 #include <cstdint>
146 #include <functional>
147 #include <iterator>
148 #include <limits>
149 #include <map>
150 #include <memory>
151 #include <string>
152 #include <tuple>
153 #include <utility>
154 
155 using namespace llvm;
156 
157 #define LV_NAME "loop-vectorize"
158 #define DEBUG_TYPE LV_NAME
159 
160 #ifndef NDEBUG
161 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
162 #endif
163 
164 /// @{
165 /// Metadata attribute names
166 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
167 const char LLVMLoopVectorizeFollowupVectorized[] =
168     "llvm.loop.vectorize.followup_vectorized";
169 const char LLVMLoopVectorizeFollowupEpilogue[] =
170     "llvm.loop.vectorize.followup_epilogue";
171 /// @}
172 
173 STATISTIC(LoopsVectorized, "Number of loops vectorized");
174 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
175 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
176 
177 static cl::opt<bool> EnableEpilogueVectorization(
178     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
179     cl::desc("Enable vectorization of epilogue loops."));
180 
181 static cl::opt<unsigned> EpilogueVectorizationForceVF(
182     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
183     cl::desc("When epilogue vectorization is enabled, and a value greater than "
184              "1 is specified, forces the given VF for all applicable epilogue "
185              "loops."));
186 
187 static cl::opt<unsigned> EpilogueVectorizationMinVF(
188     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
189     cl::desc("Only loops with vectorization factor equal to or larger than "
190              "the specified value are considered for epilogue vectorization."));
191 
192 /// Loops with a known constant trip count below this number are vectorized only
193 /// if no scalar iteration overheads are incurred.
194 static cl::opt<unsigned> TinyTripCountVectorThreshold(
195     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
196     cl::desc("Loops with a constant trip count that is smaller than this "
197              "value are vectorized only if no scalar iteration overheads "
198              "are incurred."));
199 
200 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
201     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
202     cl::desc("The maximum allowed number of runtime memory checks with a "
203              "vectorize(enable) pragma."));
204 
205 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
206 // that predication is preferred, and this lists all options. I.e., the
207 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
208 // and predicate the instructions accordingly. If tail-folding fails, there are
209 // different fallback strategies depending on these values:
210 namespace PreferPredicateTy {
211   enum Option {
212     ScalarEpilogue = 0,
213     PredicateElseScalarEpilogue,
214     PredicateOrDontVectorize
215   };
216 } // namespace PreferPredicateTy
217 
218 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
219     "prefer-predicate-over-epilogue",
220     cl::init(PreferPredicateTy::ScalarEpilogue),
221     cl::Hidden,
222     cl::desc("Tail-folding and predication preferences over creating a scalar "
223              "epilogue loop."),
224     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
225                          "scalar-epilogue",
226                          "Don't tail-predicate loops, create scalar epilogue"),
227               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
228                          "predicate-else-scalar-epilogue",
229                          "prefer tail-folding, create scalar epilogue if tail "
230                          "folding fails."),
231               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
232                          "predicate-dont-vectorize",
233                          "prefers tail-folding, don't attempt vectorization if "
234                          "tail-folding fails.")));
235 
236 static cl::opt<bool> MaximizeBandwidth(
237     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
238     cl::desc("Maximize bandwidth when selecting vectorization factor which "
239              "will be determined by the smallest type in loop."));
240 
241 static cl::opt<bool> EnableInterleavedMemAccesses(
242     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
243     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
244 
245 /// An interleave-group may need masking if it resides in a block that needs
246 /// predication, or in order to mask away gaps.
247 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
248     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
249     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
250 
251 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
252     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
253     cl::desc("We don't interleave loops with a estimated constant trip count "
254              "below this number"));
255 
256 static cl::opt<unsigned> ForceTargetNumScalarRegs(
257     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
258     cl::desc("A flag that overrides the target's number of scalar registers."));
259 
260 static cl::opt<unsigned> ForceTargetNumVectorRegs(
261     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
262     cl::desc("A flag that overrides the target's number of vector registers."));
263 
264 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
265     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
266     cl::desc("A flag that overrides the target's max interleave factor for "
267              "scalar loops."));
268 
269 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
270     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
271     cl::desc("A flag that overrides the target's max interleave factor for "
272              "vectorized loops."));
273 
274 static cl::opt<unsigned> ForceTargetInstructionCost(
275     "force-target-instruction-cost", cl::init(0), cl::Hidden,
276     cl::desc("A flag that overrides the target's expected cost for "
277              "an instruction to a single constant value. Mostly "
278              "useful for getting consistent testing."));
279 
280 static cl::opt<bool> ForceTargetSupportsScalableVectors(
281     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
282     cl::desc(
283         "Pretend that scalable vectors are supported, even if the target does "
284         "not support them. This flag should only be used for testing."));
285 
286 static cl::opt<unsigned> SmallLoopCost(
287     "small-loop-cost", cl::init(20), cl::Hidden,
288     cl::desc(
289         "The cost of a loop that is considered 'small' by the interleaver."));
290 
291 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
292     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
293     cl::desc("Enable the use of the block frequency analysis to access PGO "
294              "heuristics minimizing code growth in cold regions and being more "
295              "aggressive in hot regions."));
296 
297 // Runtime interleave loops for load/store throughput.
298 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
299     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
300     cl::desc(
301         "Enable runtime interleaving until load/store ports are saturated"));
302 
303 /// Interleave small loops with scalar reductions.
304 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
305     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
306     cl::desc("Enable interleaving for loops with small iteration counts that "
307              "contain scalar reductions to expose ILP."));
308 
309 /// The number of stores in a loop that are allowed to need predication.
310 static cl::opt<unsigned> NumberOfStoresToPredicate(
311     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
312     cl::desc("Max number of stores to be predicated behind an if."));
313 
314 static cl::opt<bool> EnableIndVarRegisterHeur(
315     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
316     cl::desc("Count the induction variable only once when interleaving"));
317 
318 static cl::opt<bool> EnableCondStoresVectorization(
319     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
320     cl::desc("Enable if predication of stores during vectorization."));
321 
322 static cl::opt<unsigned> MaxNestedScalarReductionIC(
323     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
324     cl::desc("The maximum interleave count to use when interleaving a scalar "
325              "reduction in a nested loop."));
326 
327 static cl::opt<bool>
328     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
329                            cl::Hidden,
330                            cl::desc("Prefer in-loop vector reductions, "
331                                     "overriding the targets preference."));
332 
333 static cl::opt<bool> ForceOrderedReductions(
334     "force-ordered-reductions", cl::init(false), cl::Hidden,
335     cl::desc("Enable the vectorisation of loops with in-order (strict) "
336              "FP reductions"));
337 
338 static cl::opt<bool> PreferPredicatedReductionSelect(
339     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
340     cl::desc(
341         "Prefer predicating a reduction operation over an after loop select."));
342 
343 cl::opt<bool> EnableVPlanNativePath(
344     "enable-vplan-native-path", cl::init(false), cl::Hidden,
345     cl::desc("Enable VPlan-native vectorization path with "
346              "support for outer loop vectorization."));
347 
348 // FIXME: Remove this switch once we have divergence analysis. Currently we
349 // assume divergent non-backedge branches when this switch is true.
350 cl::opt<bool> EnableVPlanPredication(
351     "enable-vplan-predication", cl::init(false), cl::Hidden,
352     cl::desc("Enable VPlan-native vectorization path predicator with "
353              "support for outer loop vectorization."));
354 
355 // This flag enables the stress testing of the VPlan H-CFG construction in the
356 // VPlan-native vectorization path. It must be used in conjuction with
357 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
358 // verification of the H-CFGs built.
359 static cl::opt<bool> VPlanBuildStressTest(
360     "vplan-build-stress-test", cl::init(false), cl::Hidden,
361     cl::desc(
362         "Build VPlan for every supported loop nest in the function and bail "
363         "out right after the build (stress test the VPlan H-CFG construction "
364         "in the VPlan-native vectorization path)."));
365 
366 cl::opt<bool> llvm::EnableLoopInterleaving(
367     "interleave-loops", cl::init(true), cl::Hidden,
368     cl::desc("Enable loop interleaving in Loop vectorization passes"));
369 cl::opt<bool> llvm::EnableLoopVectorization(
370     "vectorize-loops", cl::init(true), cl::Hidden,
371     cl::desc("Run the Loop vectorization passes"));
372 
373 cl::opt<bool> PrintVPlansInDotFormat(
374     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
375     cl::desc("Use dot format instead of plain text when dumping VPlans"));
376 
377 /// A helper function that returns true if the given type is irregular. The
378 /// type is irregular if its allocated size doesn't equal the store size of an
379 /// element of the corresponding vector type.
380 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
381   // Determine if an array of N elements of type Ty is "bitcast compatible"
382   // with a <N x Ty> vector.
383   // This is only true if there is no padding between the array elements.
384   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
385 }
386 
387 /// A helper function that returns the reciprocal of the block probability of
388 /// predicated blocks. If we return X, we are assuming the predicated block
389 /// will execute once for every X iterations of the loop header.
390 ///
391 /// TODO: We should use actual block probability here, if available. Currently,
392 ///       we always assume predicated blocks have a 50% chance of executing.
393 static unsigned getReciprocalPredBlockProb() { return 2; }
394 
395 /// A helper function that returns an integer or floating-point constant with
396 /// value C.
397 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
398   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
399                            : ConstantFP::get(Ty, C);
400 }
401 
402 /// Returns "best known" trip count for the specified loop \p L as defined by
403 /// the following procedure:
404 ///   1) Returns exact trip count if it is known.
405 ///   2) Returns expected trip count according to profile data if any.
406 ///   3) Returns upper bound estimate if it is known.
407 ///   4) Returns None if all of the above failed.
408 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
409   // Check if exact trip count is known.
410   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
411     return ExpectedTC;
412 
413   // Check if there is an expected trip count available from profile data.
414   if (LoopVectorizeWithBlockFrequency)
415     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
416       return EstimatedTC;
417 
418   // Check if upper bound estimate is known.
419   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
420     return ExpectedTC;
421 
422   return None;
423 }
424 
425 // Forward declare GeneratedRTChecks.
426 class GeneratedRTChecks;
427 
428 namespace llvm {
429 
430 AnalysisKey ShouldRunExtraVectorPasses::Key;
431 
432 /// InnerLoopVectorizer vectorizes loops which contain only one basic
433 /// block to a specified vectorization factor (VF).
434 /// This class performs the widening of scalars into vectors, or multiple
435 /// scalars. This class also implements the following features:
436 /// * It inserts an epilogue loop for handling loops that don't have iteration
437 ///   counts that are known to be a multiple of the vectorization factor.
438 /// * It handles the code generation for reduction variables.
439 /// * Scalarization (implementation using scalars) of un-vectorizable
440 ///   instructions.
441 /// InnerLoopVectorizer does not perform any vectorization-legality
442 /// checks, and relies on the caller to check for the different legality
443 /// aspects. The InnerLoopVectorizer relies on the
444 /// LoopVectorizationLegality class to provide information about the induction
445 /// and reduction variables that were found to a given vectorization factor.
446 class InnerLoopVectorizer {
447 public:
448   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
449                       LoopInfo *LI, DominatorTree *DT,
450                       const TargetLibraryInfo *TLI,
451                       const TargetTransformInfo *TTI, AssumptionCache *AC,
452                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
453                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
454                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
455                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
456       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
457         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
458         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
459         PSI(PSI), RTChecks(RTChecks) {
460     // Query this against the original loop and save it here because the profile
461     // of the original loop header may change as the transformation happens.
462     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
463         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
464   }
465 
466   virtual ~InnerLoopVectorizer() = default;
467 
468   /// Create a new empty loop that will contain vectorized instructions later
469   /// on, while the old loop will be used as the scalar remainder. Control flow
470   /// is generated around the vectorized (and scalar epilogue) loops consisting
471   /// of various checks and bypasses. Return the pre-header block of the new
472   /// loop and the start value for the canonical induction, if it is != 0. The
473   /// latter is the case when vectorizing the epilogue loop. In the case of
474   /// epilogue vectorization, this function is overriden to handle the more
475   /// complex control flow around the loops.
476   virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton();
477 
478   /// Widen a single call instruction within the innermost loop.
479   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
480                             VPTransformState &State);
481 
482   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
483   void fixVectorizedLoop(VPTransformState &State);
484 
485   // Return true if any runtime check is added.
486   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
487 
488   /// A type for vectorized values in the new loop. Each value from the
489   /// original loop, when vectorized, is represented by UF vector values in the
490   /// new unrolled loop, where UF is the unroll factor.
491   using VectorParts = SmallVector<Value *, 2>;
492 
493   /// Vectorize a single vector PHINode in a block in the VPlan-native path
494   /// only.
495   void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR,
496                            VPTransformState &State);
497 
498   /// A helper function to scalarize a single Instruction in the innermost loop.
499   /// Generates a sequence of scalar instances for each lane between \p MinLane
500   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
501   /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p
502   /// Instr's operands.
503   void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe,
504                             const VPIteration &Instance, bool IfPredicateInstr,
505                             VPTransformState &State);
506 
507   /// Construct the vector value of a scalarized value \p V one lane at a time.
508   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
509                                  VPTransformState &State);
510 
511   /// Try to vectorize interleaved access group \p Group with the base address
512   /// given in \p Addr, optionally masking the vector operations if \p
513   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
514   /// values in the vectorized loop.
515   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
516                                 ArrayRef<VPValue *> VPDefs,
517                                 VPTransformState &State, VPValue *Addr,
518                                 ArrayRef<VPValue *> StoredValues,
519                                 VPValue *BlockInMask = nullptr);
520 
521   /// Set the debug location in the builder \p Ptr using the debug location in
522   /// \p V. If \p Ptr is None then it uses the class member's Builder.
523   void setDebugLocFromInst(const Value *V,
524                            Optional<IRBuilderBase *> CustomBuilder = None);
525 
526   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
527   void fixNonInductionPHIs(VPTransformState &State);
528 
529   /// Returns true if the reordering of FP operations is not allowed, but we are
530   /// able to vectorize with strict in-order reductions for the given RdxDesc.
531   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc);
532 
533   /// Create a broadcast instruction. This method generates a broadcast
534   /// instruction (shuffle) for loop invariant values and for the induction
535   /// value. If this is the induction variable then we extend it to N, N+1, ...
536   /// this is needed because each iteration in the loop corresponds to a SIMD
537   /// element.
538   virtual Value *getBroadcastInstrs(Value *V);
539 
540   /// Add metadata from one instruction to another.
541   ///
542   /// This includes both the original MDs from \p From and additional ones (\see
543   /// addNewMetadata).  Use this for *newly created* instructions in the vector
544   /// loop.
545   void addMetadata(Instruction *To, Instruction *From);
546 
547   /// Similar to the previous function but it adds the metadata to a
548   /// vector of instructions.
549   void addMetadata(ArrayRef<Value *> To, Instruction *From);
550 
551   // Returns the resume value (bc.merge.rdx) for a reduction as
552   // generated by fixReduction.
553   PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc);
554 
555 protected:
556   friend class LoopVectorizationPlanner;
557 
558   /// A small list of PHINodes.
559   using PhiVector = SmallVector<PHINode *, 4>;
560 
561   /// A type for scalarized values in the new loop. Each value from the
562   /// original loop, when scalarized, is represented by UF x VF scalar values
563   /// in the new unrolled loop, where UF is the unroll factor and VF is the
564   /// vectorization factor.
565   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
566 
567   /// Set up the values of the IVs correctly when exiting the vector loop.
568   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
569                     Value *CountRoundDown, Value *EndValue,
570                     BasicBlock *MiddleBlock, BasicBlock *VectorHeader);
571 
572   /// Handle all cross-iteration phis in the header.
573   void fixCrossIterationPHIs(VPTransformState &State);
574 
575   /// Create the exit value of first order recurrences in the middle block and
576   /// update their users.
577   void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR,
578                                VPTransformState &State);
579 
580   /// Create code for the loop exit value of the reduction.
581   void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State);
582 
583   /// Clear NSW/NUW flags from reduction instructions if necessary.
584   void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
585                                VPTransformState &State);
586 
587   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
588   /// means we need to add the appropriate incoming value from the middle
589   /// block as exiting edges from the scalar epilogue loop (if present) are
590   /// already in place, and we exit the vector loop exclusively to the middle
591   /// block.
592   void fixLCSSAPHIs(VPTransformState &State);
593 
594   /// Iteratively sink the scalarized operands of a predicated instruction into
595   /// the block that was created for it.
596   void sinkScalarOperands(Instruction *PredInst);
597 
598   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
599   /// represented as.
600   void truncateToMinimalBitwidths(VPTransformState &State);
601 
602   /// Returns (and creates if needed) the original loop trip count.
603   Value *getOrCreateTripCount(BasicBlock *InsertBlock);
604 
605   /// Returns (and creates if needed) the trip count of the widened loop.
606   Value *getOrCreateVectorTripCount(BasicBlock *InsertBlock);
607 
608   /// Returns a bitcasted value to the requested vector type.
609   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
610   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
611                                 const DataLayout &DL);
612 
613   /// Emit a bypass check to see if the vector trip count is zero, including if
614   /// it overflows.
615   void emitMinimumIterationCountCheck(BasicBlock *Bypass);
616 
617   /// Emit a bypass check to see if all of the SCEV assumptions we've
618   /// had to make are correct. Returns the block containing the checks or
619   /// nullptr if no checks have been added.
620   BasicBlock *emitSCEVChecks(BasicBlock *Bypass);
621 
622   /// Emit bypass checks to check any memory assumptions we may have made.
623   /// Returns the block containing the checks or nullptr if no checks have been
624   /// added.
625   BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass);
626 
627   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
628   /// vector loop preheader, middle block and scalar preheader.
629   void createVectorLoopSkeleton(StringRef Prefix);
630 
631   /// Create new phi nodes for the induction variables to resume iteration count
632   /// in the scalar epilogue, from where the vectorized loop left off.
633   /// In cases where the loop skeleton is more complicated (eg. epilogue
634   /// vectorization) and the resume values can come from an additional bypass
635   /// block, the \p AdditionalBypass pair provides information about the bypass
636   /// block and the end value on the edge from bypass to this loop.
637   void createInductionResumeValues(
638       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
639 
640   /// Complete the loop skeleton by adding debug MDs, creating appropriate
641   /// conditional branches in the middle block, preparing the builder and
642   /// running the verifier. Return the preheader of the completed vector loop.
643   BasicBlock *completeLoopSkeleton(MDNode *OrigLoopID);
644 
645   /// Add additional metadata to \p To that was not present on \p Orig.
646   ///
647   /// Currently this is used to add the noalias annotations based on the
648   /// inserted memchecks.  Use this for instructions that are *cloned* into the
649   /// vector loop.
650   void addNewMetadata(Instruction *To, const Instruction *Orig);
651 
652   /// Collect poison-generating recipes that may generate a poison value that is
653   /// used after vectorization, even when their operands are not poison. Those
654   /// recipes meet the following conditions:
655   ///  * Contribute to the address computation of a recipe generating a widen
656   ///    memory load/store (VPWidenMemoryInstructionRecipe or
657   ///    VPInterleaveRecipe).
658   ///  * Such a widen memory load/store has at least one underlying Instruction
659   ///    that is in a basic block that needs predication and after vectorization
660   ///    the generated instruction won't be predicated.
661   void collectPoisonGeneratingRecipes(VPTransformState &State);
662 
663   /// Allow subclasses to override and print debug traces before/after vplan
664   /// execution, when trace information is requested.
665   virtual void printDebugTracesAtStart(){};
666   virtual void printDebugTracesAtEnd(){};
667 
668   /// The original loop.
669   Loop *OrigLoop;
670 
671   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
672   /// dynamic knowledge to simplify SCEV expressions and converts them to a
673   /// more usable form.
674   PredicatedScalarEvolution &PSE;
675 
676   /// Loop Info.
677   LoopInfo *LI;
678 
679   /// Dominator Tree.
680   DominatorTree *DT;
681 
682   /// Alias Analysis.
683   AAResults *AA;
684 
685   /// Target Library Info.
686   const TargetLibraryInfo *TLI;
687 
688   /// Target Transform Info.
689   const TargetTransformInfo *TTI;
690 
691   /// Assumption Cache.
692   AssumptionCache *AC;
693 
694   /// Interface to emit optimization remarks.
695   OptimizationRemarkEmitter *ORE;
696 
697   /// LoopVersioning.  It's only set up (non-null) if memchecks were
698   /// used.
699   ///
700   /// This is currently only used to add no-alias metadata based on the
701   /// memchecks.  The actually versioning is performed manually.
702   std::unique_ptr<LoopVersioning> LVer;
703 
704   /// The vectorization SIMD factor to use. Each vector will have this many
705   /// vector elements.
706   ElementCount VF;
707 
708   /// The vectorization unroll factor to use. Each scalar is vectorized to this
709   /// many different vector instructions.
710   unsigned UF;
711 
712   /// The builder that we use
713   IRBuilder<> Builder;
714 
715   // --- Vectorization state ---
716 
717   /// The vector-loop preheader.
718   BasicBlock *LoopVectorPreHeader;
719 
720   /// The scalar-loop preheader.
721   BasicBlock *LoopScalarPreHeader;
722 
723   /// Middle Block between the vector and the scalar.
724   BasicBlock *LoopMiddleBlock;
725 
726   /// The unique ExitBlock of the scalar loop if one exists.  Note that
727   /// there can be multiple exiting edges reaching this block.
728   BasicBlock *LoopExitBlock;
729 
730   /// The scalar loop body.
731   BasicBlock *LoopScalarBody;
732 
733   /// A list of all bypass blocks. The first block is the entry of the loop.
734   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
735 
736   /// Store instructions that were predicated.
737   SmallVector<Instruction *, 4> PredicatedInstructions;
738 
739   /// Trip count of the original loop.
740   Value *TripCount = nullptr;
741 
742   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
743   Value *VectorTripCount = nullptr;
744 
745   /// The legality analysis.
746   LoopVectorizationLegality *Legal;
747 
748   /// The profitablity analysis.
749   LoopVectorizationCostModel *Cost;
750 
751   // Record whether runtime checks are added.
752   bool AddedSafetyChecks = false;
753 
754   // Holds the end values for each induction variable. We save the end values
755   // so we can later fix-up the external users of the induction variables.
756   DenseMap<PHINode *, Value *> IVEndValues;
757 
758   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
759   // fixed up at the end of vector code generation.
760   SmallVector<PHINode *, 8> OrigPHIsToFix;
761 
762   /// BFI and PSI are used to check for profile guided size optimizations.
763   BlockFrequencyInfo *BFI;
764   ProfileSummaryInfo *PSI;
765 
766   // Whether this loop should be optimized for size based on profile guided size
767   // optimizatios.
768   bool OptForSizeBasedOnProfile;
769 
770   /// Structure to hold information about generated runtime checks, responsible
771   /// for cleaning the checks, if vectorization turns out unprofitable.
772   GeneratedRTChecks &RTChecks;
773 
774   // Holds the resume values for reductions in the loops, used to set the
775   // correct start value of reduction PHIs when vectorizing the epilogue.
776   SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4>
777       ReductionResumeValues;
778 };
779 
780 class InnerLoopUnroller : public InnerLoopVectorizer {
781 public:
782   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
783                     LoopInfo *LI, DominatorTree *DT,
784                     const TargetLibraryInfo *TLI,
785                     const TargetTransformInfo *TTI, AssumptionCache *AC,
786                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
787                     LoopVectorizationLegality *LVL,
788                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
789                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
790       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
791                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
792                             BFI, PSI, Check) {}
793 
794 private:
795   Value *getBroadcastInstrs(Value *V) override;
796 };
797 
798 /// Encapsulate information regarding vectorization of a loop and its epilogue.
799 /// This information is meant to be updated and used across two stages of
800 /// epilogue vectorization.
801 struct EpilogueLoopVectorizationInfo {
802   ElementCount MainLoopVF = ElementCount::getFixed(0);
803   unsigned MainLoopUF = 0;
804   ElementCount EpilogueVF = ElementCount::getFixed(0);
805   unsigned EpilogueUF = 0;
806   BasicBlock *MainLoopIterationCountCheck = nullptr;
807   BasicBlock *EpilogueIterationCountCheck = nullptr;
808   BasicBlock *SCEVSafetyCheck = nullptr;
809   BasicBlock *MemSafetyCheck = nullptr;
810   Value *TripCount = nullptr;
811   Value *VectorTripCount = nullptr;
812 
813   EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF,
814                                 ElementCount EVF, unsigned EUF)
815       : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) {
816     assert(EUF == 1 &&
817            "A high UF for the epilogue loop is likely not beneficial.");
818   }
819 };
820 
821 /// An extension of the inner loop vectorizer that creates a skeleton for a
822 /// vectorized loop that has its epilogue (residual) also vectorized.
823 /// The idea is to run the vplan on a given loop twice, firstly to setup the
824 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
825 /// from the first step and vectorize the epilogue.  This is achieved by
826 /// deriving two concrete strategy classes from this base class and invoking
827 /// them in succession from the loop vectorizer planner.
828 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
829 public:
830   InnerLoopAndEpilogueVectorizer(
831       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
832       DominatorTree *DT, const TargetLibraryInfo *TLI,
833       const TargetTransformInfo *TTI, AssumptionCache *AC,
834       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
835       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
836       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
837       GeneratedRTChecks &Checks)
838       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
839                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
840                             Checks),
841         EPI(EPI) {}
842 
843   // Override this function to handle the more complex control flow around the
844   // three loops.
845   std::pair<BasicBlock *, Value *>
846   createVectorizedLoopSkeleton() final override {
847     return createEpilogueVectorizedLoopSkeleton();
848   }
849 
850   /// The interface for creating a vectorized skeleton using one of two
851   /// different strategies, each corresponding to one execution of the vplan
852   /// as described above.
853   virtual std::pair<BasicBlock *, Value *>
854   createEpilogueVectorizedLoopSkeleton() = 0;
855 
856   /// Holds and updates state information required to vectorize the main loop
857   /// and its epilogue in two separate passes. This setup helps us avoid
858   /// regenerating and recomputing runtime safety checks. It also helps us to
859   /// shorten the iteration-count-check path length for the cases where the
860   /// iteration count of the loop is so small that the main vector loop is
861   /// completely skipped.
862   EpilogueLoopVectorizationInfo &EPI;
863 };
864 
865 /// A specialized derived class of inner loop vectorizer that performs
866 /// vectorization of *main* loops in the process of vectorizing loops and their
867 /// epilogues.
868 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
869 public:
870   EpilogueVectorizerMainLoop(
871       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
872       DominatorTree *DT, const TargetLibraryInfo *TLI,
873       const TargetTransformInfo *TTI, AssumptionCache *AC,
874       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
875       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
876       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
877       GeneratedRTChecks &Check)
878       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
879                                        EPI, LVL, CM, BFI, PSI, Check) {}
880   /// Implements the interface for creating a vectorized skeleton using the
881   /// *main loop* strategy (ie the first pass of vplan execution).
882   std::pair<BasicBlock *, Value *>
883   createEpilogueVectorizedLoopSkeleton() final override;
884 
885 protected:
886   /// Emits an iteration count bypass check once for the main loop (when \p
887   /// ForEpilogue is false) and once for the epilogue loop (when \p
888   /// ForEpilogue is true).
889   BasicBlock *emitMinimumIterationCountCheck(BasicBlock *Bypass,
890                                              bool ForEpilogue);
891   void printDebugTracesAtStart() override;
892   void printDebugTracesAtEnd() override;
893 };
894 
895 // A specialized derived class of inner loop vectorizer that performs
896 // vectorization of *epilogue* loops in the process of vectorizing loops and
897 // their epilogues.
898 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
899 public:
900   EpilogueVectorizerEpilogueLoop(
901       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
902       DominatorTree *DT, const TargetLibraryInfo *TLI,
903       const TargetTransformInfo *TTI, AssumptionCache *AC,
904       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
905       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
906       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
907       GeneratedRTChecks &Checks)
908       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
909                                        EPI, LVL, CM, BFI, PSI, Checks) {
910     TripCount = EPI.TripCount;
911   }
912   /// Implements the interface for creating a vectorized skeleton using the
913   /// *epilogue loop* strategy (ie the second pass of vplan execution).
914   std::pair<BasicBlock *, Value *>
915   createEpilogueVectorizedLoopSkeleton() final override;
916 
917 protected:
918   /// Emits an iteration count bypass check after the main vector loop has
919   /// finished to see if there are any iterations left to execute by either
920   /// the vector epilogue or the scalar epilogue.
921   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(
922                                                       BasicBlock *Bypass,
923                                                       BasicBlock *Insert);
924   void printDebugTracesAtStart() override;
925   void printDebugTracesAtEnd() override;
926 };
927 } // end namespace llvm
928 
929 /// Look for a meaningful debug location on the instruction or it's
930 /// operands.
931 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
932   if (!I)
933     return I;
934 
935   DebugLoc Empty;
936   if (I->getDebugLoc() != Empty)
937     return I;
938 
939   for (Use &Op : I->operands()) {
940     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
941       if (OpInst->getDebugLoc() != Empty)
942         return OpInst;
943   }
944 
945   return I;
946 }
947 
948 void InnerLoopVectorizer::setDebugLocFromInst(
949     const Value *V, Optional<IRBuilderBase *> CustomBuilder) {
950   IRBuilderBase *B = (CustomBuilder == None) ? &Builder : *CustomBuilder;
951   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) {
952     const DILocation *DIL = Inst->getDebugLoc();
953 
954     // When a FSDiscriminator is enabled, we don't need to add the multiply
955     // factors to the discriminators.
956     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
957         !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) {
958       // FIXME: For scalable vectors, assume vscale=1.
959       auto NewDIL =
960           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
961       if (NewDIL)
962         B->SetCurrentDebugLocation(NewDIL.getValue());
963       else
964         LLVM_DEBUG(dbgs()
965                    << "Failed to create new discriminator: "
966                    << DIL->getFilename() << " Line: " << DIL->getLine());
967     } else
968       B->SetCurrentDebugLocation(DIL);
969   } else
970     B->SetCurrentDebugLocation(DebugLoc());
971 }
972 
973 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
974 /// is passed, the message relates to that particular instruction.
975 #ifndef NDEBUG
976 static void debugVectorizationMessage(const StringRef Prefix,
977                                       const StringRef DebugMsg,
978                                       Instruction *I) {
979   dbgs() << "LV: " << Prefix << DebugMsg;
980   if (I != nullptr)
981     dbgs() << " " << *I;
982   else
983     dbgs() << '.';
984   dbgs() << '\n';
985 }
986 #endif
987 
988 /// Create an analysis remark that explains why vectorization failed
989 ///
990 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
991 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
992 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
993 /// the location of the remark.  \return the remark object that can be
994 /// streamed to.
995 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
996     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
997   Value *CodeRegion = TheLoop->getHeader();
998   DebugLoc DL = TheLoop->getStartLoc();
999 
1000   if (I) {
1001     CodeRegion = I->getParent();
1002     // If there is no debug location attached to the instruction, revert back to
1003     // using the loop's.
1004     if (I->getDebugLoc())
1005       DL = I->getDebugLoc();
1006   }
1007 
1008   return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1009 }
1010 
1011 namespace llvm {
1012 
1013 /// Return a value for Step multiplied by VF.
1014 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF,
1015                        int64_t Step) {
1016   assert(Ty->isIntegerTy() && "Expected an integer step");
1017   Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue());
1018   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1019 }
1020 
1021 /// Return the runtime value for VF.
1022 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF) {
1023   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1024   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1025 }
1026 
1027 static Value *getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy,
1028                                   ElementCount VF) {
1029   assert(FTy->isFloatingPointTy() && "Expected floating point type!");
1030   Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits());
1031   Value *RuntimeVF = getRuntimeVF(B, IntTy, VF);
1032   return B.CreateUIToFP(RuntimeVF, FTy);
1033 }
1034 
1035 void reportVectorizationFailure(const StringRef DebugMsg,
1036                                 const StringRef OREMsg, const StringRef ORETag,
1037                                 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1038                                 Instruction *I) {
1039   LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
1040   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1041   ORE->emit(
1042       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1043       << "loop not vectorized: " << OREMsg);
1044 }
1045 
1046 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1047                              OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1048                              Instruction *I) {
1049   LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
1050   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1051   ORE->emit(
1052       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1053       << Msg);
1054 }
1055 
1056 } // end namespace llvm
1057 
1058 #ifndef NDEBUG
1059 /// \return string containing a file name and a line # for the given loop.
1060 static std::string getDebugLocString(const Loop *L) {
1061   std::string Result;
1062   if (L) {
1063     raw_string_ostream OS(Result);
1064     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1065       LoopDbgLoc.print(OS);
1066     else
1067       // Just print the module name.
1068       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1069     OS.flush();
1070   }
1071   return Result;
1072 }
1073 #endif
1074 
1075 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1076                                          const Instruction *Orig) {
1077   // If the loop was versioned with memchecks, add the corresponding no-alias
1078   // metadata.
1079   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1080     LVer->annotateInstWithNoAlias(To, Orig);
1081 }
1082 
1083 void InnerLoopVectorizer::collectPoisonGeneratingRecipes(
1084     VPTransformState &State) {
1085 
1086   // Collect recipes in the backward slice of `Root` that may generate a poison
1087   // value that is used after vectorization.
1088   SmallPtrSet<VPRecipeBase *, 16> Visited;
1089   auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1090     SmallVector<VPRecipeBase *, 16> Worklist;
1091     Worklist.push_back(Root);
1092 
1093     // Traverse the backward slice of Root through its use-def chain.
1094     while (!Worklist.empty()) {
1095       VPRecipeBase *CurRec = Worklist.back();
1096       Worklist.pop_back();
1097 
1098       if (!Visited.insert(CurRec).second)
1099         continue;
1100 
1101       // Prune search if we find another recipe generating a widen memory
1102       // instruction. Widen memory instructions involved in address computation
1103       // will lead to gather/scatter instructions, which don't need to be
1104       // handled.
1105       if (isa<VPWidenMemoryInstructionRecipe>(CurRec) ||
1106           isa<VPInterleaveRecipe>(CurRec) ||
1107           isa<VPScalarIVStepsRecipe>(CurRec) ||
1108           isa<VPCanonicalIVPHIRecipe>(CurRec))
1109         continue;
1110 
1111       // This recipe contributes to the address computation of a widen
1112       // load/store. Collect recipe if its underlying instruction has
1113       // poison-generating flags.
1114       Instruction *Instr = CurRec->getUnderlyingInstr();
1115       if (Instr && Instr->hasPoisonGeneratingFlags())
1116         State.MayGeneratePoisonRecipes.insert(CurRec);
1117 
1118       // Add new definitions to the worklist.
1119       for (VPValue *operand : CurRec->operands())
1120         if (VPDef *OpDef = operand->getDef())
1121           Worklist.push_back(cast<VPRecipeBase>(OpDef));
1122     }
1123   });
1124 
1125   // Traverse all the recipes in the VPlan and collect the poison-generating
1126   // recipes in the backward slice starting at the address of a VPWidenRecipe or
1127   // VPInterleaveRecipe.
1128   auto Iter = depth_first(
1129       VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry()));
1130   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1131     for (VPRecipeBase &Recipe : *VPBB) {
1132       if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) {
1133         Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr();
1134         VPDef *AddrDef = WidenRec->getAddr()->getDef();
1135         if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr &&
1136             Legal->blockNeedsPredication(UnderlyingInstr->getParent()))
1137           collectPoisonGeneratingInstrsInBackwardSlice(
1138               cast<VPRecipeBase>(AddrDef));
1139       } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1140         VPDef *AddrDef = InterleaveRec->getAddr()->getDef();
1141         if (AddrDef) {
1142           // Check if any member of the interleave group needs predication.
1143           const InterleaveGroup<Instruction> *InterGroup =
1144               InterleaveRec->getInterleaveGroup();
1145           bool NeedPredication = false;
1146           for (int I = 0, NumMembers = InterGroup->getNumMembers();
1147                I < NumMembers; ++I) {
1148             Instruction *Member = InterGroup->getMember(I);
1149             if (Member)
1150               NeedPredication |=
1151                   Legal->blockNeedsPredication(Member->getParent());
1152           }
1153 
1154           if (NeedPredication)
1155             collectPoisonGeneratingInstrsInBackwardSlice(
1156                 cast<VPRecipeBase>(AddrDef));
1157         }
1158       }
1159     }
1160   }
1161 }
1162 
1163 void InnerLoopVectorizer::addMetadata(Instruction *To,
1164                                       Instruction *From) {
1165   propagateMetadata(To, From);
1166   addNewMetadata(To, From);
1167 }
1168 
1169 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1170                                       Instruction *From) {
1171   for (Value *V : To) {
1172     if (Instruction *I = dyn_cast<Instruction>(V))
1173       addMetadata(I, From);
1174   }
1175 }
1176 
1177 PHINode *InnerLoopVectorizer::getReductionResumeValue(
1178     const RecurrenceDescriptor &RdxDesc) {
1179   auto It = ReductionResumeValues.find(&RdxDesc);
1180   assert(It != ReductionResumeValues.end() &&
1181          "Expected to find a resume value for the reduction.");
1182   return It->second;
1183 }
1184 
1185 namespace llvm {
1186 
1187 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1188 // lowered.
1189 enum ScalarEpilogueLowering {
1190 
1191   // The default: allowing scalar epilogues.
1192   CM_ScalarEpilogueAllowed,
1193 
1194   // Vectorization with OptForSize: don't allow epilogues.
1195   CM_ScalarEpilogueNotAllowedOptSize,
1196 
1197   // A special case of vectorisation with OptForSize: loops with a very small
1198   // trip count are considered for vectorization under OptForSize, thereby
1199   // making sure the cost of their loop body is dominant, free of runtime
1200   // guards and scalar iteration overheads.
1201   CM_ScalarEpilogueNotAllowedLowTripLoop,
1202 
1203   // Loop hint predicate indicating an epilogue is undesired.
1204   CM_ScalarEpilogueNotNeededUsePredicate,
1205 
1206   // Directive indicating we must either tail fold or not vectorize
1207   CM_ScalarEpilogueNotAllowedUsePredicate
1208 };
1209 
1210 /// ElementCountComparator creates a total ordering for ElementCount
1211 /// for the purposes of using it in a set structure.
1212 struct ElementCountComparator {
1213   bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
1214     return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
1215            std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
1216   }
1217 };
1218 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
1219 
1220 /// LoopVectorizationCostModel - estimates the expected speedups due to
1221 /// vectorization.
1222 /// In many cases vectorization is not profitable. This can happen because of
1223 /// a number of reasons. In this class we mainly attempt to predict the
1224 /// expected speedup/slowdowns due to the supported instruction set. We use the
1225 /// TargetTransformInfo to query the different backends for the cost of
1226 /// different operations.
1227 class LoopVectorizationCostModel {
1228 public:
1229   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1230                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1231                              LoopVectorizationLegality *Legal,
1232                              const TargetTransformInfo &TTI,
1233                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1234                              AssumptionCache *AC,
1235                              OptimizationRemarkEmitter *ORE, const Function *F,
1236                              const LoopVectorizeHints *Hints,
1237                              InterleavedAccessInfo &IAI)
1238       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1239         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1240         Hints(Hints), InterleaveInfo(IAI) {}
1241 
1242   /// \return An upper bound for the vectorization factors (both fixed and
1243   /// scalable). If the factors are 0, vectorization and interleaving should be
1244   /// avoided up front.
1245   FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
1246 
1247   /// \return True if runtime checks are required for vectorization, and false
1248   /// otherwise.
1249   bool runtimeChecksRequired();
1250 
1251   /// \return The most profitable vectorization factor and the cost of that VF.
1252   /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
1253   /// then this vectorization factor will be selected if vectorization is
1254   /// possible.
1255   VectorizationFactor
1256   selectVectorizationFactor(const ElementCountSet &CandidateVFs);
1257 
1258   VectorizationFactor
1259   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1260                                     const LoopVectorizationPlanner &LVP);
1261 
1262   /// Setup cost-based decisions for user vectorization factor.
1263   /// \return true if the UserVF is a feasible VF to be chosen.
1264   bool selectUserVectorizationFactor(ElementCount UserVF) {
1265     collectUniformsAndScalars(UserVF);
1266     collectInstsToScalarize(UserVF);
1267     return expectedCost(UserVF).first.isValid();
1268   }
1269 
1270   /// \return The size (in bits) of the smallest and widest types in the code
1271   /// that needs to be vectorized. We ignore values that remain scalar such as
1272   /// 64 bit loop indices.
1273   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1274 
1275   /// \return The desired interleave count.
1276   /// If interleave count has been specified by metadata it will be returned.
1277   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1278   /// are the selected vectorization factor and the cost of the selected VF.
1279   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1280 
1281   /// Memory access instruction may be vectorized in more than one way.
1282   /// Form of instruction after vectorization depends on cost.
1283   /// This function takes cost-based decisions for Load/Store instructions
1284   /// and collects them in a map. This decisions map is used for building
1285   /// the lists of loop-uniform and loop-scalar instructions.
1286   /// The calculated cost is saved with widening decision in order to
1287   /// avoid redundant calculations.
1288   void setCostBasedWideningDecision(ElementCount VF);
1289 
1290   /// A struct that represents some properties of the register usage
1291   /// of a loop.
1292   struct RegisterUsage {
1293     /// Holds the number of loop invariant values that are used in the loop.
1294     /// The key is ClassID of target-provided register class.
1295     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1296     /// Holds the maximum number of concurrent live intervals in the loop.
1297     /// The key is ClassID of target-provided register class.
1298     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1299   };
1300 
1301   /// \return Returns information about the register usages of the loop for the
1302   /// given vectorization factors.
1303   SmallVector<RegisterUsage, 8>
1304   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1305 
1306   /// Collect values we want to ignore in the cost model.
1307   void collectValuesToIgnore();
1308 
1309   /// Collect all element types in the loop for which widening is needed.
1310   void collectElementTypesForWidening();
1311 
1312   /// Split reductions into those that happen in the loop, and those that happen
1313   /// outside. In loop reductions are collected into InLoopReductionChains.
1314   void collectInLoopReductions();
1315 
1316   /// Returns true if we should use strict in-order reductions for the given
1317   /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
1318   /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
1319   /// of FP operations.
1320   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) {
1321     return !Hints->allowReordering() && RdxDesc.isOrdered();
1322   }
1323 
1324   /// \returns The smallest bitwidth each instruction can be represented with.
1325   /// The vector equivalents of these instructions should be truncated to this
1326   /// type.
1327   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1328     return MinBWs;
1329   }
1330 
1331   /// \returns True if it is more profitable to scalarize instruction \p I for
1332   /// vectorization factor \p VF.
1333   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1334     assert(VF.isVector() &&
1335            "Profitable to scalarize relevant only for VF > 1.");
1336 
1337     // Cost model is not run in the VPlan-native path - return conservative
1338     // result until this changes.
1339     if (EnableVPlanNativePath)
1340       return false;
1341 
1342     auto Scalars = InstsToScalarize.find(VF);
1343     assert(Scalars != InstsToScalarize.end() &&
1344            "VF not yet analyzed for scalarization profitability");
1345     return Scalars->second.find(I) != Scalars->second.end();
1346   }
1347 
1348   /// Returns true if \p I is known to be uniform after vectorization.
1349   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1350     if (VF.isScalar())
1351       return true;
1352 
1353     // Cost model is not run in the VPlan-native path - return conservative
1354     // result until this changes.
1355     if (EnableVPlanNativePath)
1356       return false;
1357 
1358     auto UniformsPerVF = Uniforms.find(VF);
1359     assert(UniformsPerVF != Uniforms.end() &&
1360            "VF not yet analyzed for uniformity");
1361     return UniformsPerVF->second.count(I);
1362   }
1363 
1364   /// Returns true if \p I is known to be scalar after vectorization.
1365   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1366     if (VF.isScalar())
1367       return true;
1368 
1369     // Cost model is not run in the VPlan-native path - return conservative
1370     // result until this changes.
1371     if (EnableVPlanNativePath)
1372       return false;
1373 
1374     auto ScalarsPerVF = Scalars.find(VF);
1375     assert(ScalarsPerVF != Scalars.end() &&
1376            "Scalar values are not calculated for VF");
1377     return ScalarsPerVF->second.count(I);
1378   }
1379 
1380   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1381   /// for vectorization factor \p VF.
1382   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1383     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1384            !isProfitableToScalarize(I, VF) &&
1385            !isScalarAfterVectorization(I, VF);
1386   }
1387 
1388   /// Decision that was taken during cost calculation for memory instruction.
1389   enum InstWidening {
1390     CM_Unknown,
1391     CM_Widen,         // For consecutive accesses with stride +1.
1392     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1393     CM_Interleave,
1394     CM_GatherScatter,
1395     CM_Scalarize
1396   };
1397 
1398   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1399   /// instruction \p I and vector width \p VF.
1400   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1401                            InstructionCost Cost) {
1402     assert(VF.isVector() && "Expected VF >=2");
1403     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1404   }
1405 
1406   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1407   /// interleaving group \p Grp and vector width \p VF.
1408   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1409                            ElementCount VF, InstWidening W,
1410                            InstructionCost Cost) {
1411     assert(VF.isVector() && "Expected VF >=2");
1412     /// Broadcast this decicion to all instructions inside the group.
1413     /// But the cost will be assigned to one instruction only.
1414     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1415       if (auto *I = Grp->getMember(i)) {
1416         if (Grp->getInsertPos() == I)
1417           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1418         else
1419           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1420       }
1421     }
1422   }
1423 
1424   /// Return the cost model decision for the given instruction \p I and vector
1425   /// width \p VF. Return CM_Unknown if this instruction did not pass
1426   /// through the cost modeling.
1427   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1428     assert(VF.isVector() && "Expected VF to be a vector VF");
1429     // Cost model is not run in the VPlan-native path - return conservative
1430     // result until this changes.
1431     if (EnableVPlanNativePath)
1432       return CM_GatherScatter;
1433 
1434     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1435     auto Itr = WideningDecisions.find(InstOnVF);
1436     if (Itr == WideningDecisions.end())
1437       return CM_Unknown;
1438     return Itr->second.first;
1439   }
1440 
1441   /// Return the vectorization cost for the given instruction \p I and vector
1442   /// width \p VF.
1443   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1444     assert(VF.isVector() && "Expected VF >=2");
1445     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1446     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1447            "The cost is not calculated");
1448     return WideningDecisions[InstOnVF].second;
1449   }
1450 
1451   /// Return True if instruction \p I is an optimizable truncate whose operand
1452   /// is an induction variable. Such a truncate will be removed by adding a new
1453   /// induction variable with the destination type.
1454   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1455     // If the instruction is not a truncate, return false.
1456     auto *Trunc = dyn_cast<TruncInst>(I);
1457     if (!Trunc)
1458       return false;
1459 
1460     // Get the source and destination types of the truncate.
1461     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1462     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1463 
1464     // If the truncate is free for the given types, return false. Replacing a
1465     // free truncate with an induction variable would add an induction variable
1466     // update instruction to each iteration of the loop. We exclude from this
1467     // check the primary induction variable since it will need an update
1468     // instruction regardless.
1469     Value *Op = Trunc->getOperand(0);
1470     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1471       return false;
1472 
1473     // If the truncated value is not an induction variable, return false.
1474     return Legal->isInductionPhi(Op);
1475   }
1476 
1477   /// Collects the instructions to scalarize for each predicated instruction in
1478   /// the loop.
1479   void collectInstsToScalarize(ElementCount VF);
1480 
1481   /// Collect Uniform and Scalar values for the given \p VF.
1482   /// The sets depend on CM decision for Load/Store instructions
1483   /// that may be vectorized as interleave, gather-scatter or scalarized.
1484   void collectUniformsAndScalars(ElementCount VF) {
1485     // Do the analysis once.
1486     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1487       return;
1488     setCostBasedWideningDecision(VF);
1489     collectLoopUniforms(VF);
1490     collectLoopScalars(VF);
1491   }
1492 
1493   /// Returns true if the target machine supports masked store operation
1494   /// for the given \p DataType and kind of access to \p Ptr.
1495   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1496     return Legal->isConsecutivePtr(DataType, Ptr) &&
1497            TTI.isLegalMaskedStore(DataType, Alignment);
1498   }
1499 
1500   /// Returns true if the target machine supports masked load operation
1501   /// for the given \p DataType and kind of access to \p Ptr.
1502   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1503     return Legal->isConsecutivePtr(DataType, Ptr) &&
1504            TTI.isLegalMaskedLoad(DataType, Alignment);
1505   }
1506 
1507   /// Returns true if the target machine can represent \p V as a masked gather
1508   /// or scatter operation.
1509   bool isLegalGatherOrScatter(Value *V,
1510                               ElementCount VF = ElementCount::getFixed(1)) {
1511     bool LI = isa<LoadInst>(V);
1512     bool SI = isa<StoreInst>(V);
1513     if (!LI && !SI)
1514       return false;
1515     auto *Ty = getLoadStoreType(V);
1516     Align Align = getLoadStoreAlignment(V);
1517     if (VF.isVector())
1518       Ty = VectorType::get(Ty, VF);
1519     return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1520            (SI && TTI.isLegalMaskedScatter(Ty, Align));
1521   }
1522 
1523   /// Returns true if the target machine supports all of the reduction
1524   /// variables found for the given VF.
1525   bool canVectorizeReductions(ElementCount VF) const {
1526     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1527       const RecurrenceDescriptor &RdxDesc = Reduction.second;
1528       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1529     }));
1530   }
1531 
1532   /// Returns true if \p I is an instruction that will be scalarized with
1533   /// predication when vectorizing \p I with vectorization factor \p VF. Such
1534   /// instructions include conditional stores and instructions that may divide
1535   /// by zero.
1536   bool isScalarWithPredication(Instruction *I, ElementCount VF) const;
1537 
1538   // Returns true if \p I is an instruction that will be predicated either
1539   // through scalar predication or masked load/store or masked gather/scatter.
1540   // \p VF is the vectorization factor that will be used to vectorize \p I.
1541   // Superset of instructions that return true for isScalarWithPredication.
1542   bool isPredicatedInst(Instruction *I, ElementCount VF,
1543                         bool IsKnownUniform = false) {
1544     // When we know the load is uniform and the original scalar loop was not
1545     // predicated we don't need to mark it as a predicated instruction. Any
1546     // vectorised blocks created when tail-folding are something artificial we
1547     // have introduced and we know there is always at least one active lane.
1548     // That's why we call Legal->blockNeedsPredication here because it doesn't
1549     // query tail-folding.
1550     if (IsKnownUniform && isa<LoadInst>(I) &&
1551         !Legal->blockNeedsPredication(I->getParent()))
1552       return false;
1553     if (!blockNeedsPredicationForAnyReason(I->getParent()))
1554       return false;
1555     // Loads and stores that need some form of masked operation are predicated
1556     // instructions.
1557     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1558       return Legal->isMaskRequired(I);
1559     return isScalarWithPredication(I, VF);
1560   }
1561 
1562   /// Returns true if \p I is a memory instruction with consecutive memory
1563   /// access that can be widened.
1564   bool
1565   memoryInstructionCanBeWidened(Instruction *I,
1566                                 ElementCount VF = ElementCount::getFixed(1));
1567 
1568   /// Returns true if \p I is a memory instruction in an interleaved-group
1569   /// of memory accesses that can be vectorized with wide vector loads/stores
1570   /// and shuffles.
1571   bool
1572   interleavedAccessCanBeWidened(Instruction *I,
1573                                 ElementCount VF = ElementCount::getFixed(1));
1574 
1575   /// Check if \p Instr belongs to any interleaved access group.
1576   bool isAccessInterleaved(Instruction *Instr) {
1577     return InterleaveInfo.isInterleaved(Instr);
1578   }
1579 
1580   /// Get the interleaved access group that \p Instr belongs to.
1581   const InterleaveGroup<Instruction> *
1582   getInterleavedAccessGroup(Instruction *Instr) {
1583     return InterleaveInfo.getInterleaveGroup(Instr);
1584   }
1585 
1586   /// Returns true if we're required to use a scalar epilogue for at least
1587   /// the final iteration of the original loop.
1588   bool requiresScalarEpilogue(ElementCount VF) const {
1589     if (!isScalarEpilogueAllowed())
1590       return false;
1591     // If we might exit from anywhere but the latch, must run the exiting
1592     // iteration in scalar form.
1593     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1594       return true;
1595     return VF.isVector() && InterleaveInfo.requiresScalarEpilogue();
1596   }
1597 
1598   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1599   /// loop hint annotation.
1600   bool isScalarEpilogueAllowed() const {
1601     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1602   }
1603 
1604   /// Returns true if all loop blocks should be masked to fold tail loop.
1605   bool foldTailByMasking() const { return FoldTailByMasking; }
1606 
1607   /// Returns true if the instructions in this block requires predication
1608   /// for any reason, e.g. because tail folding now requires a predicate
1609   /// or because the block in the original loop was predicated.
1610   bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const {
1611     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1612   }
1613 
1614   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1615   /// nodes to the chain of instructions representing the reductions. Uses a
1616   /// MapVector to ensure deterministic iteration order.
1617   using ReductionChainMap =
1618       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1619 
1620   /// Return the chain of instructions representing an inloop reduction.
1621   const ReductionChainMap &getInLoopReductionChains() const {
1622     return InLoopReductionChains;
1623   }
1624 
1625   /// Returns true if the Phi is part of an inloop reduction.
1626   bool isInLoopReduction(PHINode *Phi) const {
1627     return InLoopReductionChains.count(Phi);
1628   }
1629 
1630   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1631   /// with factor VF.  Return the cost of the instruction, including
1632   /// scalarization overhead if it's needed.
1633   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1634 
1635   /// Estimate cost of a call instruction CI if it were vectorized with factor
1636   /// VF. Return the cost of the instruction, including scalarization overhead
1637   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1638   /// scalarized -
1639   /// i.e. either vector version isn't available, or is too expensive.
1640   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1641                                     bool &NeedToScalarize) const;
1642 
1643   /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1644   /// that of B.
1645   bool isMoreProfitable(const VectorizationFactor &A,
1646                         const VectorizationFactor &B) const;
1647 
1648   /// Invalidates decisions already taken by the cost model.
1649   void invalidateCostModelingDecisions() {
1650     WideningDecisions.clear();
1651     Uniforms.clear();
1652     Scalars.clear();
1653   }
1654 
1655 private:
1656   unsigned NumPredStores = 0;
1657 
1658   /// Convenience function that returns the value of vscale_range iff
1659   /// vscale_range.min == vscale_range.max or otherwise returns the value
1660   /// returned by the corresponding TLI method.
1661   Optional<unsigned> getVScaleForTuning() const;
1662 
1663   /// \return An upper bound for the vectorization factors for both
1664   /// fixed and scalable vectorization, where the minimum-known number of
1665   /// elements is a power-of-2 larger than zero. If scalable vectorization is
1666   /// disabled or unsupported, then the scalable part will be equal to
1667   /// ElementCount::getScalable(0).
1668   FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
1669                                            ElementCount UserVF,
1670                                            bool FoldTailByMasking);
1671 
1672   /// \return the maximized element count based on the targets vector
1673   /// registers and the loop trip-count, but limited to a maximum safe VF.
1674   /// This is a helper function of computeFeasibleMaxVF.
1675   /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1676   /// issue that occurred on one of the buildbots which cannot be reproduced
1677   /// without having access to the properietary compiler (see comments on
1678   /// D98509). The issue is currently under investigation and this workaround
1679   /// will be removed as soon as possible.
1680   ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1681                                        unsigned SmallestType,
1682                                        unsigned WidestType,
1683                                        const ElementCount &MaxSafeVF,
1684                                        bool FoldTailByMasking);
1685 
1686   /// \return the maximum legal scalable VF, based on the safe max number
1687   /// of elements.
1688   ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1689 
1690   /// The vectorization cost is a combination of the cost itself and a boolean
1691   /// indicating whether any of the contributing operations will actually
1692   /// operate on vector values after type legalization in the backend. If this
1693   /// latter value is false, then all operations will be scalarized (i.e. no
1694   /// vectorization has actually taken place).
1695   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1696 
1697   /// Returns the expected execution cost. The unit of the cost does
1698   /// not matter because we use the 'cost' units to compare different
1699   /// vector widths. The cost that is returned is *not* normalized by
1700   /// the factor width. If \p Invalid is not nullptr, this function
1701   /// will add a pair(Instruction*, ElementCount) to \p Invalid for
1702   /// each instruction that has an Invalid cost for the given VF.
1703   using InstructionVFPair = std::pair<Instruction *, ElementCount>;
1704   VectorizationCostTy
1705   expectedCost(ElementCount VF,
1706                SmallVectorImpl<InstructionVFPair> *Invalid = nullptr);
1707 
1708   /// Returns the execution time cost of an instruction for a given vector
1709   /// width. Vector width of one means scalar.
1710   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1711 
1712   /// The cost-computation logic from getInstructionCost which provides
1713   /// the vector type as an output parameter.
1714   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1715                                      Type *&VectorTy);
1716 
1717   /// Return the cost of instructions in an inloop reduction pattern, if I is
1718   /// part of that pattern.
1719   Optional<InstructionCost>
1720   getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy,
1721                           TTI::TargetCostKind CostKind);
1722 
1723   /// Calculate vectorization cost of memory instruction \p I.
1724   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1725 
1726   /// The cost computation for scalarized memory instruction.
1727   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1728 
1729   /// The cost computation for interleaving group of memory instructions.
1730   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1731 
1732   /// The cost computation for Gather/Scatter instruction.
1733   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1734 
1735   /// The cost computation for widening instruction \p I with consecutive
1736   /// memory access.
1737   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1738 
1739   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1740   /// Load: scalar load + broadcast.
1741   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1742   /// element)
1743   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1744 
1745   /// Estimate the overhead of scalarizing an instruction. This is a
1746   /// convenience wrapper for the type-based getScalarizationOverhead API.
1747   InstructionCost getScalarizationOverhead(Instruction *I,
1748                                            ElementCount VF) const;
1749 
1750   /// Returns whether the instruction is a load or store and will be a emitted
1751   /// as a vector operation.
1752   bool isConsecutiveLoadOrStore(Instruction *I);
1753 
1754   /// Returns true if an artificially high cost for emulated masked memrefs
1755   /// should be used.
1756   bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1757 
1758   /// Map of scalar integer values to the smallest bitwidth they can be legally
1759   /// represented as. The vector equivalents of these values should be truncated
1760   /// to this type.
1761   MapVector<Instruction *, uint64_t> MinBWs;
1762 
1763   /// A type representing the costs for instructions if they were to be
1764   /// scalarized rather than vectorized. The entries are Instruction-Cost
1765   /// pairs.
1766   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1767 
1768   /// A set containing all BasicBlocks that are known to present after
1769   /// vectorization as a predicated block.
1770   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1771 
1772   /// Records whether it is allowed to have the original scalar loop execute at
1773   /// least once. This may be needed as a fallback loop in case runtime
1774   /// aliasing/dependence checks fail, or to handle the tail/remainder
1775   /// iterations when the trip count is unknown or doesn't divide by the VF,
1776   /// or as a peel-loop to handle gaps in interleave-groups.
1777   /// Under optsize and when the trip count is very small we don't allow any
1778   /// iterations to execute in the scalar loop.
1779   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1780 
1781   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1782   bool FoldTailByMasking = false;
1783 
1784   /// A map holding scalar costs for different vectorization factors. The
1785   /// presence of a cost for an instruction in the mapping indicates that the
1786   /// instruction will be scalarized when vectorizing with the associated
1787   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1788   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1789 
1790   /// Holds the instructions known to be uniform after vectorization.
1791   /// The data is collected per VF.
1792   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1793 
1794   /// Holds the instructions known to be scalar after vectorization.
1795   /// The data is collected per VF.
1796   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1797 
1798   /// Holds the instructions (address computations) that are forced to be
1799   /// scalarized.
1800   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1801 
1802   /// PHINodes of the reductions that should be expanded in-loop along with
1803   /// their associated chains of reduction operations, in program order from top
1804   /// (PHI) to bottom
1805   ReductionChainMap InLoopReductionChains;
1806 
1807   /// A Map of inloop reduction operations and their immediate chain operand.
1808   /// FIXME: This can be removed once reductions can be costed correctly in
1809   /// vplan. This was added to allow quick lookup to the inloop operations,
1810   /// without having to loop through InLoopReductionChains.
1811   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1812 
1813   /// Returns the expected difference in cost from scalarizing the expression
1814   /// feeding a predicated instruction \p PredInst. The instructions to
1815   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1816   /// non-negative return value implies the expression will be scalarized.
1817   /// Currently, only single-use chains are considered for scalarization.
1818   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1819                               ElementCount VF);
1820 
1821   /// Collect the instructions that are uniform after vectorization. An
1822   /// instruction is uniform if we represent it with a single scalar value in
1823   /// the vectorized loop corresponding to each vector iteration. Examples of
1824   /// uniform instructions include pointer operands of consecutive or
1825   /// interleaved memory accesses. Note that although uniformity implies an
1826   /// instruction will be scalar, the reverse is not true. In general, a
1827   /// scalarized instruction will be represented by VF scalar values in the
1828   /// vectorized loop, each corresponding to an iteration of the original
1829   /// scalar loop.
1830   void collectLoopUniforms(ElementCount VF);
1831 
1832   /// Collect the instructions that are scalar after vectorization. An
1833   /// instruction is scalar if it is known to be uniform or will be scalarized
1834   /// during vectorization. collectLoopScalars should only add non-uniform nodes
1835   /// to the list if they are used by a load/store instruction that is marked as
1836   /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1837   /// VF values in the vectorized loop, each corresponding to an iteration of
1838   /// the original scalar loop.
1839   void collectLoopScalars(ElementCount VF);
1840 
1841   /// Keeps cost model vectorization decision and cost for instructions.
1842   /// Right now it is used for memory instructions only.
1843   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1844                                 std::pair<InstWidening, InstructionCost>>;
1845 
1846   DecisionList WideningDecisions;
1847 
1848   /// Returns true if \p V is expected to be vectorized and it needs to be
1849   /// extracted.
1850   bool needsExtract(Value *V, ElementCount VF) const {
1851     Instruction *I = dyn_cast<Instruction>(V);
1852     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1853         TheLoop->isLoopInvariant(I))
1854       return false;
1855 
1856     // Assume we can vectorize V (and hence we need extraction) if the
1857     // scalars are not computed yet. This can happen, because it is called
1858     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1859     // the scalars are collected. That should be a safe assumption in most
1860     // cases, because we check if the operands have vectorizable types
1861     // beforehand in LoopVectorizationLegality.
1862     return Scalars.find(VF) == Scalars.end() ||
1863            !isScalarAfterVectorization(I, VF);
1864   };
1865 
1866   /// Returns a range containing only operands needing to be extracted.
1867   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1868                                                    ElementCount VF) const {
1869     return SmallVector<Value *, 4>(make_filter_range(
1870         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1871   }
1872 
1873   /// Determines if we have the infrastructure to vectorize loop \p L and its
1874   /// epilogue, assuming the main loop is vectorized by \p VF.
1875   bool isCandidateForEpilogueVectorization(const Loop &L,
1876                                            const ElementCount VF) const;
1877 
1878   /// Returns true if epilogue vectorization is considered profitable, and
1879   /// false otherwise.
1880   /// \p VF is the vectorization factor chosen for the original loop.
1881   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1882 
1883 public:
1884   /// The loop that we evaluate.
1885   Loop *TheLoop;
1886 
1887   /// Predicated scalar evolution analysis.
1888   PredicatedScalarEvolution &PSE;
1889 
1890   /// Loop Info analysis.
1891   LoopInfo *LI;
1892 
1893   /// Vectorization legality.
1894   LoopVectorizationLegality *Legal;
1895 
1896   /// Vector target information.
1897   const TargetTransformInfo &TTI;
1898 
1899   /// Target Library Info.
1900   const TargetLibraryInfo *TLI;
1901 
1902   /// Demanded bits analysis.
1903   DemandedBits *DB;
1904 
1905   /// Assumption cache.
1906   AssumptionCache *AC;
1907 
1908   /// Interface to emit optimization remarks.
1909   OptimizationRemarkEmitter *ORE;
1910 
1911   const Function *TheFunction;
1912 
1913   /// Loop Vectorize Hint.
1914   const LoopVectorizeHints *Hints;
1915 
1916   /// The interleave access information contains groups of interleaved accesses
1917   /// with the same stride and close to each other.
1918   InterleavedAccessInfo &InterleaveInfo;
1919 
1920   /// Values to ignore in the cost model.
1921   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1922 
1923   /// Values to ignore in the cost model when VF > 1.
1924   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1925 
1926   /// All element types found in the loop.
1927   SmallPtrSet<Type *, 16> ElementTypesInLoop;
1928 
1929   /// Profitable vector factors.
1930   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1931 };
1932 } // end namespace llvm
1933 
1934 /// Helper struct to manage generating runtime checks for vectorization.
1935 ///
1936 /// The runtime checks are created up-front in temporary blocks to allow better
1937 /// estimating the cost and un-linked from the existing IR. After deciding to
1938 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1939 /// temporary blocks are completely removed.
1940 class GeneratedRTChecks {
1941   /// Basic block which contains the generated SCEV checks, if any.
1942   BasicBlock *SCEVCheckBlock = nullptr;
1943 
1944   /// The value representing the result of the generated SCEV checks. If it is
1945   /// nullptr, either no SCEV checks have been generated or they have been used.
1946   Value *SCEVCheckCond = nullptr;
1947 
1948   /// Basic block which contains the generated memory runtime checks, if any.
1949   BasicBlock *MemCheckBlock = nullptr;
1950 
1951   /// The value representing the result of the generated memory runtime checks.
1952   /// If it is nullptr, either no memory runtime checks have been generated or
1953   /// they have been used.
1954   Value *MemRuntimeCheckCond = nullptr;
1955 
1956   DominatorTree *DT;
1957   LoopInfo *LI;
1958 
1959   SCEVExpander SCEVExp;
1960   SCEVExpander MemCheckExp;
1961 
1962 public:
1963   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1964                     const DataLayout &DL)
1965       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1966         MemCheckExp(SE, DL, "scev.check") {}
1967 
1968   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1969   /// accurately estimate the cost of the runtime checks. The blocks are
1970   /// un-linked from the IR and is added back during vector code generation. If
1971   /// there is no vector code generation, the check blocks are removed
1972   /// completely.
1973   void Create(Loop *L, const LoopAccessInfo &LAI,
1974               const SCEVPredicate &Pred) {
1975 
1976     BasicBlock *LoopHeader = L->getHeader();
1977     BasicBlock *Preheader = L->getLoopPreheader();
1978 
1979     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1980     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1981     // may be used by SCEVExpander. The blocks will be un-linked from their
1982     // predecessors and removed from LI & DT at the end of the function.
1983     if (!Pred.isAlwaysTrue()) {
1984       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1985                                   nullptr, "vector.scevcheck");
1986 
1987       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1988           &Pred, SCEVCheckBlock->getTerminator());
1989     }
1990 
1991     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1992     if (RtPtrChecking.Need) {
1993       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1994       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1995                                  "vector.memcheck");
1996 
1997       MemRuntimeCheckCond =
1998           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
1999                            RtPtrChecking.getChecks(), MemCheckExp);
2000       assert(MemRuntimeCheckCond &&
2001              "no RT checks generated although RtPtrChecking "
2002              "claimed checks are required");
2003     }
2004 
2005     if (!MemCheckBlock && !SCEVCheckBlock)
2006       return;
2007 
2008     // Unhook the temporary block with the checks, update various places
2009     // accordingly.
2010     if (SCEVCheckBlock)
2011       SCEVCheckBlock->replaceAllUsesWith(Preheader);
2012     if (MemCheckBlock)
2013       MemCheckBlock->replaceAllUsesWith(Preheader);
2014 
2015     if (SCEVCheckBlock) {
2016       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2017       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
2018       Preheader->getTerminator()->eraseFromParent();
2019     }
2020     if (MemCheckBlock) {
2021       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2022       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
2023       Preheader->getTerminator()->eraseFromParent();
2024     }
2025 
2026     DT->changeImmediateDominator(LoopHeader, Preheader);
2027     if (MemCheckBlock) {
2028       DT->eraseNode(MemCheckBlock);
2029       LI->removeBlock(MemCheckBlock);
2030     }
2031     if (SCEVCheckBlock) {
2032       DT->eraseNode(SCEVCheckBlock);
2033       LI->removeBlock(SCEVCheckBlock);
2034     }
2035   }
2036 
2037   /// Remove the created SCEV & memory runtime check blocks & instructions, if
2038   /// unused.
2039   ~GeneratedRTChecks() {
2040     SCEVExpanderCleaner SCEVCleaner(SCEVExp);
2041     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
2042     if (!SCEVCheckCond)
2043       SCEVCleaner.markResultUsed();
2044 
2045     if (!MemRuntimeCheckCond)
2046       MemCheckCleaner.markResultUsed();
2047 
2048     if (MemRuntimeCheckCond) {
2049       auto &SE = *MemCheckExp.getSE();
2050       // Memory runtime check generation creates compares that use expanded
2051       // values. Remove them before running the SCEVExpanderCleaners.
2052       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2053         if (MemCheckExp.isInsertedInstruction(&I))
2054           continue;
2055         SE.forgetValue(&I);
2056         I.eraseFromParent();
2057       }
2058     }
2059     MemCheckCleaner.cleanup();
2060     SCEVCleaner.cleanup();
2061 
2062     if (SCEVCheckCond)
2063       SCEVCheckBlock->eraseFromParent();
2064     if (MemRuntimeCheckCond)
2065       MemCheckBlock->eraseFromParent();
2066   }
2067 
2068   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2069   /// adjusts the branches to branch to the vector preheader or \p Bypass,
2070   /// depending on the generated condition.
2071   BasicBlock *emitSCEVChecks(BasicBlock *Bypass,
2072                              BasicBlock *LoopVectorPreHeader,
2073                              BasicBlock *LoopExitBlock) {
2074     if (!SCEVCheckCond)
2075       return nullptr;
2076     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2077       if (C->isZero())
2078         return nullptr;
2079 
2080     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2081 
2082     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2083     // Create new preheader for vector loop.
2084     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2085       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2086 
2087     SCEVCheckBlock->getTerminator()->eraseFromParent();
2088     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2089     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2090                                                 SCEVCheckBlock);
2091 
2092     DT->addNewBlock(SCEVCheckBlock, Pred);
2093     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2094 
2095     ReplaceInstWithInst(
2096         SCEVCheckBlock->getTerminator(),
2097         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2098     // Mark the check as used, to prevent it from being removed during cleanup.
2099     SCEVCheckCond = nullptr;
2100     return SCEVCheckBlock;
2101   }
2102 
2103   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2104   /// the branches to branch to the vector preheader or \p Bypass, depending on
2105   /// the generated condition.
2106   BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass,
2107                                    BasicBlock *LoopVectorPreHeader) {
2108     // Check if we generated code that checks in runtime if arrays overlap.
2109     if (!MemRuntimeCheckCond)
2110       return nullptr;
2111 
2112     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2113     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2114                                                 MemCheckBlock);
2115 
2116     DT->addNewBlock(MemCheckBlock, Pred);
2117     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2118     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2119 
2120     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2121       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2122 
2123     ReplaceInstWithInst(
2124         MemCheckBlock->getTerminator(),
2125         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2126     MemCheckBlock->getTerminator()->setDebugLoc(
2127         Pred->getTerminator()->getDebugLoc());
2128 
2129     // Mark the check as used, to prevent it from being removed during cleanup.
2130     MemRuntimeCheckCond = nullptr;
2131     return MemCheckBlock;
2132   }
2133 };
2134 
2135 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2136 // vectorization. The loop needs to be annotated with #pragma omp simd
2137 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2138 // vector length information is not provided, vectorization is not considered
2139 // explicit. Interleave hints are not allowed either. These limitations will be
2140 // relaxed in the future.
2141 // Please, note that we are currently forced to abuse the pragma 'clang
2142 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2143 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2144 // provides *explicit vectorization hints* (LV can bypass legal checks and
2145 // assume that vectorization is legal). However, both hints are implemented
2146 // using the same metadata (llvm.loop.vectorize, processed by
2147 // LoopVectorizeHints). This will be fixed in the future when the native IR
2148 // representation for pragma 'omp simd' is introduced.
2149 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2150                                    OptimizationRemarkEmitter *ORE) {
2151   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2152   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2153 
2154   // Only outer loops with an explicit vectorization hint are supported.
2155   // Unannotated outer loops are ignored.
2156   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2157     return false;
2158 
2159   Function *Fn = OuterLp->getHeader()->getParent();
2160   if (!Hints.allowVectorization(Fn, OuterLp,
2161                                 true /*VectorizeOnlyWhenForced*/)) {
2162     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2163     return false;
2164   }
2165 
2166   if (Hints.getInterleave() > 1) {
2167     // TODO: Interleave support is future work.
2168     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2169                          "outer loops.\n");
2170     Hints.emitRemarkWithHints();
2171     return false;
2172   }
2173 
2174   return true;
2175 }
2176 
2177 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2178                                   OptimizationRemarkEmitter *ORE,
2179                                   SmallVectorImpl<Loop *> &V) {
2180   // Collect inner loops and outer loops without irreducible control flow. For
2181   // now, only collect outer loops that have explicit vectorization hints. If we
2182   // are stress testing the VPlan H-CFG construction, we collect the outermost
2183   // loop of every loop nest.
2184   if (L.isInnermost() || VPlanBuildStressTest ||
2185       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2186     LoopBlocksRPO RPOT(&L);
2187     RPOT.perform(LI);
2188     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2189       V.push_back(&L);
2190       // TODO: Collect inner loops inside marked outer loops in case
2191       // vectorization fails for the outer loop. Do not invoke
2192       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2193       // already known to be reducible. We can use an inherited attribute for
2194       // that.
2195       return;
2196     }
2197   }
2198   for (Loop *InnerL : L)
2199     collectSupportedLoops(*InnerL, LI, ORE, V);
2200 }
2201 
2202 namespace {
2203 
2204 /// The LoopVectorize Pass.
2205 struct LoopVectorize : public FunctionPass {
2206   /// Pass identification, replacement for typeid
2207   static char ID;
2208 
2209   LoopVectorizePass Impl;
2210 
2211   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2212                          bool VectorizeOnlyWhenForced = false)
2213       : FunctionPass(ID),
2214         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2215     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2216   }
2217 
2218   bool runOnFunction(Function &F) override {
2219     if (skipFunction(F))
2220       return false;
2221 
2222     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2223     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2224     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2225     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2226     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2227     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2228     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2229     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2230     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2231     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2232     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2233     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2234     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2235 
2236     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2237         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2238 
2239     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2240                         GetLAA, *ORE, PSI).MadeAnyChange;
2241   }
2242 
2243   void getAnalysisUsage(AnalysisUsage &AU) const override {
2244     AU.addRequired<AssumptionCacheTracker>();
2245     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2246     AU.addRequired<DominatorTreeWrapperPass>();
2247     AU.addRequired<LoopInfoWrapperPass>();
2248     AU.addRequired<ScalarEvolutionWrapperPass>();
2249     AU.addRequired<TargetTransformInfoWrapperPass>();
2250     AU.addRequired<AAResultsWrapperPass>();
2251     AU.addRequired<LoopAccessLegacyAnalysis>();
2252     AU.addRequired<DemandedBitsWrapperPass>();
2253     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2254     AU.addRequired<InjectTLIMappingsLegacy>();
2255 
2256     // We currently do not preserve loopinfo/dominator analyses with outer loop
2257     // vectorization. Until this is addressed, mark these analyses as preserved
2258     // only for non-VPlan-native path.
2259     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2260     if (!EnableVPlanNativePath) {
2261       AU.addPreserved<LoopInfoWrapperPass>();
2262       AU.addPreserved<DominatorTreeWrapperPass>();
2263     }
2264 
2265     AU.addPreserved<BasicAAWrapperPass>();
2266     AU.addPreserved<GlobalsAAWrapperPass>();
2267     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2268   }
2269 };
2270 
2271 } // end anonymous namespace
2272 
2273 //===----------------------------------------------------------------------===//
2274 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2275 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2276 //===----------------------------------------------------------------------===//
2277 
2278 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2279   // We need to place the broadcast of invariant variables outside the loop,
2280   // but only if it's proven safe to do so. Else, broadcast will be inside
2281   // vector loop body.
2282   Instruction *Instr = dyn_cast<Instruction>(V);
2283   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2284                      (!Instr ||
2285                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2286   // Place the code for broadcasting invariant variables in the new preheader.
2287   IRBuilder<>::InsertPointGuard Guard(Builder);
2288   if (SafeToHoist)
2289     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2290 
2291   // Broadcast the scalar into all locations in the vector.
2292   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2293 
2294   return Shuf;
2295 }
2296 
2297 /// This function adds
2298 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
2299 /// to each vector element of Val. The sequence starts at StartIndex.
2300 /// \p Opcode is relevant for FP induction variable.
2301 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
2302                             Instruction::BinaryOps BinOp, ElementCount VF,
2303                             IRBuilderBase &Builder) {
2304   assert(VF.isVector() && "only vector VFs are supported");
2305 
2306   // Create and check the types.
2307   auto *ValVTy = cast<VectorType>(Val->getType());
2308   ElementCount VLen = ValVTy->getElementCount();
2309 
2310   Type *STy = Val->getType()->getScalarType();
2311   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2312          "Induction Step must be an integer or FP");
2313   assert(Step->getType() == STy && "Step has wrong type");
2314 
2315   SmallVector<Constant *, 8> Indices;
2316 
2317   // Create a vector of consecutive numbers from zero to VF.
2318   VectorType *InitVecValVTy = ValVTy;
2319   if (STy->isFloatingPointTy()) {
2320     Type *InitVecValSTy =
2321         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2322     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2323   }
2324   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2325 
2326   // Splat the StartIdx
2327   Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
2328 
2329   if (STy->isIntegerTy()) {
2330     InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2331     Step = Builder.CreateVectorSplat(VLen, Step);
2332     assert(Step->getType() == Val->getType() && "Invalid step vec");
2333     // FIXME: The newly created binary instructions should contain nsw/nuw
2334     // flags, which can be found from the original scalar operations.
2335     Step = Builder.CreateMul(InitVec, Step);
2336     return Builder.CreateAdd(Val, Step, "induction");
2337   }
2338 
2339   // Floating point induction.
2340   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2341          "Binary Opcode should be specified for FP induction");
2342   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2343   InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
2344 
2345   Step = Builder.CreateVectorSplat(VLen, Step);
2346   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2347   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2348 }
2349 
2350 /// Compute scalar induction steps. \p ScalarIV is the scalar induction
2351 /// variable on which to base the steps, \p Step is the size of the step.
2352 static void buildScalarSteps(Value *ScalarIV, Value *Step,
2353                              const InductionDescriptor &ID, VPValue *Def,
2354                              VPTransformState &State) {
2355   IRBuilderBase &Builder = State.Builder;
2356   // We shouldn't have to build scalar steps if we aren't vectorizing.
2357   assert(State.VF.isVector() && "VF should be greater than one");
2358   // Get the value type and ensure it and the step have the same integer type.
2359   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2360   assert(ScalarIVTy == Step->getType() &&
2361          "Val and Step should have the same type");
2362 
2363   // We build scalar steps for both integer and floating-point induction
2364   // variables. Here, we determine the kind of arithmetic we will perform.
2365   Instruction::BinaryOps AddOp;
2366   Instruction::BinaryOps MulOp;
2367   if (ScalarIVTy->isIntegerTy()) {
2368     AddOp = Instruction::Add;
2369     MulOp = Instruction::Mul;
2370   } else {
2371     AddOp = ID.getInductionOpcode();
2372     MulOp = Instruction::FMul;
2373   }
2374 
2375   // Determine the number of scalars we need to generate for each unroll
2376   // iteration.
2377   bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def);
2378   unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue();
2379   // Compute the scalar steps and save the results in State.
2380   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2381                                      ScalarIVTy->getScalarSizeInBits());
2382   Type *VecIVTy = nullptr;
2383   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2384   if (!FirstLaneOnly && State.VF.isScalable()) {
2385     VecIVTy = VectorType::get(ScalarIVTy, State.VF);
2386     UnitStepVec =
2387         Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
2388     SplatStep = Builder.CreateVectorSplat(State.VF, Step);
2389     SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV);
2390   }
2391 
2392   for (unsigned Part = 0; Part < State.UF; ++Part) {
2393     Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part);
2394 
2395     if (!FirstLaneOnly && State.VF.isScalable()) {
2396       auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
2397       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2398       if (ScalarIVTy->isFloatingPointTy())
2399         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2400       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2401       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2402       State.set(Def, Add, Part);
2403       // It's useful to record the lane values too for the known minimum number
2404       // of elements so we do those below. This improves the code quality when
2405       // trying to extract the first element, for example.
2406     }
2407 
2408     if (ScalarIVTy->isFloatingPointTy())
2409       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2410 
2411     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2412       Value *StartIdx = Builder.CreateBinOp(
2413           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2414       // The step returned by `createStepForVF` is a runtime-evaluated value
2415       // when VF is scalable. Otherwise, it should be folded into a Constant.
2416       assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
2417              "Expected StartIdx to be folded to a constant when VF is not "
2418              "scalable");
2419       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2420       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2421       State.set(Def, Add, VPIteration(Part, Lane));
2422     }
2423   }
2424 }
2425 
2426 // Generate code for the induction step. Note that induction steps are
2427 // required to be loop-invariant
2428 static Value *CreateStepValue(const SCEV *Step, ScalarEvolution &SE,
2429                               Instruction *InsertBefore,
2430                               Loop *OrigLoop = nullptr) {
2431   const DataLayout &DL = SE.getDataLayout();
2432   assert((!OrigLoop || SE.isLoopInvariant(Step, OrigLoop)) &&
2433          "Induction step should be loop invariant");
2434   if (auto *E = dyn_cast<SCEVUnknown>(Step))
2435     return E->getValue();
2436 
2437   SCEVExpander Exp(SE, DL, "induction");
2438   return Exp.expandCodeFor(Step, Step->getType(), InsertBefore);
2439 }
2440 
2441 /// Compute the transformed value of Index at offset StartValue using step
2442 /// StepValue.
2443 /// For integer induction, returns StartValue + Index * StepValue.
2444 /// For pointer induction, returns StartValue[Index * StepValue].
2445 /// FIXME: The newly created binary instructions should contain nsw/nuw
2446 /// flags, which can be found from the original scalar operations.
2447 static Value *emitTransformedIndex(IRBuilderBase &B, Value *Index,
2448                                    Value *StartValue, Value *Step,
2449                                    const InductionDescriptor &ID) {
2450   assert(Index->getType()->getScalarType() == Step->getType() &&
2451          "Index scalar type does not match StepValue type");
2452 
2453   // Note: the IR at this point is broken. We cannot use SE to create any new
2454   // SCEV and then expand it, hoping that SCEV's simplification will give us
2455   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2456   // lead to various SCEV crashes. So all we can do is to use builder and rely
2457   // on InstCombine for future simplifications. Here we handle some trivial
2458   // cases only.
2459   auto CreateAdd = [&B](Value *X, Value *Y) {
2460     assert(X->getType() == Y->getType() && "Types don't match!");
2461     if (auto *CX = dyn_cast<ConstantInt>(X))
2462       if (CX->isZero())
2463         return Y;
2464     if (auto *CY = dyn_cast<ConstantInt>(Y))
2465       if (CY->isZero())
2466         return X;
2467     return B.CreateAdd(X, Y);
2468   };
2469 
2470   // We allow X to be a vector type, in which case Y will potentially be
2471   // splatted into a vector with the same element count.
2472   auto CreateMul = [&B](Value *X, Value *Y) {
2473     assert(X->getType()->getScalarType() == Y->getType() &&
2474            "Types don't match!");
2475     if (auto *CX = dyn_cast<ConstantInt>(X))
2476       if (CX->isOne())
2477         return Y;
2478     if (auto *CY = dyn_cast<ConstantInt>(Y))
2479       if (CY->isOne())
2480         return X;
2481     VectorType *XVTy = dyn_cast<VectorType>(X->getType());
2482     if (XVTy && !isa<VectorType>(Y->getType()))
2483       Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
2484     return B.CreateMul(X, Y);
2485   };
2486 
2487   switch (ID.getKind()) {
2488   case InductionDescriptor::IK_IntInduction: {
2489     assert(!isa<VectorType>(Index->getType()) &&
2490            "Vector indices not supported for integer inductions yet");
2491     assert(Index->getType() == StartValue->getType() &&
2492            "Index type does not match StartValue type");
2493     if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
2494       return B.CreateSub(StartValue, Index);
2495     auto *Offset = CreateMul(Index, Step);
2496     return CreateAdd(StartValue, Offset);
2497   }
2498   case InductionDescriptor::IK_PtrInduction: {
2499     assert(isa<Constant>(Step) &&
2500            "Expected constant step for pointer induction");
2501     return B.CreateGEP(ID.getElementType(), StartValue, CreateMul(Index, Step));
2502   }
2503   case InductionDescriptor::IK_FpInduction: {
2504     assert(!isa<VectorType>(Index->getType()) &&
2505            "Vector indices not supported for FP inductions yet");
2506     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2507     auto InductionBinOp = ID.getInductionBinOp();
2508     assert(InductionBinOp &&
2509            (InductionBinOp->getOpcode() == Instruction::FAdd ||
2510             InductionBinOp->getOpcode() == Instruction::FSub) &&
2511            "Original bin op should be defined for FP induction");
2512 
2513     Value *MulExp = B.CreateFMul(Step, Index);
2514     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2515                          "induction");
2516   }
2517   case InductionDescriptor::IK_NoInduction:
2518     return nullptr;
2519   }
2520   llvm_unreachable("invalid enum");
2521 }
2522 
2523 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2524                                                     const VPIteration &Instance,
2525                                                     VPTransformState &State) {
2526   Value *ScalarInst = State.get(Def, Instance);
2527   Value *VectorValue = State.get(Def, Instance.Part);
2528   VectorValue = Builder.CreateInsertElement(
2529       VectorValue, ScalarInst,
2530       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2531   State.set(Def, VectorValue, Instance.Part);
2532 }
2533 
2534 // Return whether we allow using masked interleave-groups (for dealing with
2535 // strided loads/stores that reside in predicated blocks, or for dealing
2536 // with gaps).
2537 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2538   // If an override option has been passed in for interleaved accesses, use it.
2539   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2540     return EnableMaskedInterleavedMemAccesses;
2541 
2542   return TTI.enableMaskedInterleavedAccessVectorization();
2543 }
2544 
2545 // Try to vectorize the interleave group that \p Instr belongs to.
2546 //
2547 // E.g. Translate following interleaved load group (factor = 3):
2548 //   for (i = 0; i < N; i+=3) {
2549 //     R = Pic[i];             // Member of index 0
2550 //     G = Pic[i+1];           // Member of index 1
2551 //     B = Pic[i+2];           // Member of index 2
2552 //     ... // do something to R, G, B
2553 //   }
2554 // To:
2555 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2556 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2557 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2558 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2559 //
2560 // Or translate following interleaved store group (factor = 3):
2561 //   for (i = 0; i < N; i+=3) {
2562 //     ... do something to R, G, B
2563 //     Pic[i]   = R;           // Member of index 0
2564 //     Pic[i+1] = G;           // Member of index 1
2565 //     Pic[i+2] = B;           // Member of index 2
2566 //   }
2567 // To:
2568 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2569 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2570 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2571 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2572 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2573 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2574     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2575     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2576     VPValue *BlockInMask) {
2577   Instruction *Instr = Group->getInsertPos();
2578   const DataLayout &DL = Instr->getModule()->getDataLayout();
2579 
2580   // Prepare for the vector type of the interleaved load/store.
2581   Type *ScalarTy = getLoadStoreType(Instr);
2582   unsigned InterleaveFactor = Group->getFactor();
2583   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2584   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2585 
2586   // Prepare for the new pointers.
2587   SmallVector<Value *, 2> AddrParts;
2588   unsigned Index = Group->getIndex(Instr);
2589 
2590   // TODO: extend the masked interleaved-group support to reversed access.
2591   assert((!BlockInMask || !Group->isReverse()) &&
2592          "Reversed masked interleave-group not supported.");
2593 
2594   // If the group is reverse, adjust the index to refer to the last vector lane
2595   // instead of the first. We adjust the index from the first vector lane,
2596   // rather than directly getting the pointer for lane VF - 1, because the
2597   // pointer operand of the interleaved access is supposed to be uniform. For
2598   // uniform instructions, we're only required to generate a value for the
2599   // first vector lane in each unroll iteration.
2600   if (Group->isReverse())
2601     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2602 
2603   for (unsigned Part = 0; Part < UF; Part++) {
2604     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2605     setDebugLocFromInst(AddrPart);
2606 
2607     // Notice current instruction could be any index. Need to adjust the address
2608     // to the member of index 0.
2609     //
2610     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2611     //       b = A[i];       // Member of index 0
2612     // Current pointer is pointed to A[i+1], adjust it to A[i].
2613     //
2614     // E.g.  A[i+1] = a;     // Member of index 1
2615     //       A[i]   = b;     // Member of index 0
2616     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2617     // Current pointer is pointed to A[i+2], adjust it to A[i].
2618 
2619     bool InBounds = false;
2620     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2621       InBounds = gep->isInBounds();
2622     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2623     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2624 
2625     // Cast to the vector pointer type.
2626     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2627     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2628     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2629   }
2630 
2631   setDebugLocFromInst(Instr);
2632   Value *PoisonVec = PoisonValue::get(VecTy);
2633 
2634   Value *MaskForGaps = nullptr;
2635   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2636     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2637     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2638   }
2639 
2640   // Vectorize the interleaved load group.
2641   if (isa<LoadInst>(Instr)) {
2642     // For each unroll part, create a wide load for the group.
2643     SmallVector<Value *, 2> NewLoads;
2644     for (unsigned Part = 0; Part < UF; Part++) {
2645       Instruction *NewLoad;
2646       if (BlockInMask || MaskForGaps) {
2647         assert(useMaskedInterleavedAccesses(*TTI) &&
2648                "masked interleaved groups are not allowed.");
2649         Value *GroupMask = MaskForGaps;
2650         if (BlockInMask) {
2651           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2652           Value *ShuffledMask = Builder.CreateShuffleVector(
2653               BlockInMaskPart,
2654               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2655               "interleaved.mask");
2656           GroupMask = MaskForGaps
2657                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2658                                                 MaskForGaps)
2659                           : ShuffledMask;
2660         }
2661         NewLoad =
2662             Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(),
2663                                      GroupMask, PoisonVec, "wide.masked.vec");
2664       }
2665       else
2666         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2667                                             Group->getAlign(), "wide.vec");
2668       Group->addMetadata(NewLoad);
2669       NewLoads.push_back(NewLoad);
2670     }
2671 
2672     // For each member in the group, shuffle out the appropriate data from the
2673     // wide loads.
2674     unsigned J = 0;
2675     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2676       Instruction *Member = Group->getMember(I);
2677 
2678       // Skip the gaps in the group.
2679       if (!Member)
2680         continue;
2681 
2682       auto StrideMask =
2683           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2684       for (unsigned Part = 0; Part < UF; Part++) {
2685         Value *StridedVec = Builder.CreateShuffleVector(
2686             NewLoads[Part], StrideMask, "strided.vec");
2687 
2688         // If this member has different type, cast the result type.
2689         if (Member->getType() != ScalarTy) {
2690           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2691           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2692           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2693         }
2694 
2695         if (Group->isReverse())
2696           StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse");
2697 
2698         State.set(VPDefs[J], StridedVec, Part);
2699       }
2700       ++J;
2701     }
2702     return;
2703   }
2704 
2705   // The sub vector type for current instruction.
2706   auto *SubVT = VectorType::get(ScalarTy, VF);
2707 
2708   // Vectorize the interleaved store group.
2709   MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2710   assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) &&
2711          "masked interleaved groups are not allowed.");
2712   assert((!MaskForGaps || !VF.isScalable()) &&
2713          "masking gaps for scalable vectors is not yet supported.");
2714   for (unsigned Part = 0; Part < UF; Part++) {
2715     // Collect the stored vector from each member.
2716     SmallVector<Value *, 4> StoredVecs;
2717     for (unsigned i = 0; i < InterleaveFactor; i++) {
2718       assert((Group->getMember(i) || MaskForGaps) &&
2719              "Fail to get a member from an interleaved store group");
2720       Instruction *Member = Group->getMember(i);
2721 
2722       // Skip the gaps in the group.
2723       if (!Member) {
2724         Value *Undef = PoisonValue::get(SubVT);
2725         StoredVecs.push_back(Undef);
2726         continue;
2727       }
2728 
2729       Value *StoredVec = State.get(StoredValues[i], Part);
2730 
2731       if (Group->isReverse())
2732         StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse");
2733 
2734       // If this member has different type, cast it to a unified type.
2735 
2736       if (StoredVec->getType() != SubVT)
2737         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2738 
2739       StoredVecs.push_back(StoredVec);
2740     }
2741 
2742     // Concatenate all vectors into a wide vector.
2743     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2744 
2745     // Interleave the elements in the wide vector.
2746     Value *IVec = Builder.CreateShuffleVector(
2747         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2748         "interleaved.vec");
2749 
2750     Instruction *NewStoreInstr;
2751     if (BlockInMask || MaskForGaps) {
2752       Value *GroupMask = MaskForGaps;
2753       if (BlockInMask) {
2754         Value *BlockInMaskPart = State.get(BlockInMask, Part);
2755         Value *ShuffledMask = Builder.CreateShuffleVector(
2756             BlockInMaskPart,
2757             createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2758             "interleaved.mask");
2759         GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And,
2760                                                       ShuffledMask, MaskForGaps)
2761                                 : ShuffledMask;
2762       }
2763       NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part],
2764                                                 Group->getAlign(), GroupMask);
2765     } else
2766       NewStoreInstr =
2767           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2768 
2769     Group->addMetadata(NewStoreInstr);
2770   }
2771 }
2772 
2773 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2774                                                VPReplicateRecipe *RepRecipe,
2775                                                const VPIteration &Instance,
2776                                                bool IfPredicateInstr,
2777                                                VPTransformState &State) {
2778   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2779 
2780   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
2781   // the first lane and part.
2782   if (isa<NoAliasScopeDeclInst>(Instr))
2783     if (!Instance.isFirstIteration())
2784       return;
2785 
2786   setDebugLocFromInst(Instr);
2787 
2788   // Does this instruction return a value ?
2789   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2790 
2791   Instruction *Cloned = Instr->clone();
2792   if (!IsVoidRetTy)
2793     Cloned->setName(Instr->getName() + ".cloned");
2794 
2795   // If the scalarized instruction contributes to the address computation of a
2796   // widen masked load/store which was in a basic block that needed predication
2797   // and is not predicated after vectorization, we can't propagate
2798   // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized
2799   // instruction could feed a poison value to the base address of the widen
2800   // load/store.
2801   if (State.MayGeneratePoisonRecipes.contains(RepRecipe))
2802     Cloned->dropPoisonGeneratingFlags();
2803 
2804   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
2805                                Builder.GetInsertPoint());
2806   // Replace the operands of the cloned instructions with their scalar
2807   // equivalents in the new loop.
2808   for (auto &I : enumerate(RepRecipe->operands())) {
2809     auto InputInstance = Instance;
2810     VPValue *Operand = I.value();
2811     VPReplicateRecipe *OperandR = dyn_cast<VPReplicateRecipe>(Operand);
2812     if (OperandR && OperandR->isUniform())
2813       InputInstance.Lane = VPLane::getFirstLane();
2814     Cloned->setOperand(I.index(), State.get(Operand, InputInstance));
2815   }
2816   addNewMetadata(Cloned, Instr);
2817 
2818   // Place the cloned scalar in the new loop.
2819   Builder.Insert(Cloned);
2820 
2821   State.set(RepRecipe, Cloned, Instance);
2822 
2823   // If we just cloned a new assumption, add it the assumption cache.
2824   if (auto *II = dyn_cast<AssumeInst>(Cloned))
2825     AC->registerAssumption(II);
2826 
2827   // End if-block.
2828   if (IfPredicateInstr)
2829     PredicatedInstructions.push_back(Cloned);
2830 }
2831 
2832 Value *InnerLoopVectorizer::getOrCreateTripCount(BasicBlock *InsertBlock) {
2833   if (TripCount)
2834     return TripCount;
2835 
2836   assert(InsertBlock);
2837   IRBuilder<> Builder(InsertBlock->getTerminator());
2838   // Find the loop boundaries.
2839   ScalarEvolution *SE = PSE.getSE();
2840   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2841   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
2842          "Invalid loop count");
2843 
2844   Type *IdxTy = Legal->getWidestInductionType();
2845   assert(IdxTy && "No type for induction");
2846 
2847   // The exit count might have the type of i64 while the phi is i32. This can
2848   // happen if we have an induction variable that is sign extended before the
2849   // compare. The only way that we get a backedge taken count is that the
2850   // induction variable was signed and as such will not overflow. In such a case
2851   // truncation is legal.
2852   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
2853       IdxTy->getPrimitiveSizeInBits())
2854     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2855   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2856 
2857   // Get the total trip count from the count by adding 1.
2858   const SCEV *ExitCount = SE->getAddExpr(
2859       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2860 
2861   const DataLayout &DL = InsertBlock->getModule()->getDataLayout();
2862 
2863   // Expand the trip count and place the new instructions in the preheader.
2864   // Notice that the pre-header does not change, only the loop body.
2865   SCEVExpander Exp(*SE, DL, "induction");
2866 
2867   // Count holds the overall loop count (N).
2868   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2869                                 InsertBlock->getTerminator());
2870 
2871   if (TripCount->getType()->isPointerTy())
2872     TripCount =
2873         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2874                                     InsertBlock->getTerminator());
2875 
2876   return TripCount;
2877 }
2878 
2879 Value *
2880 InnerLoopVectorizer::getOrCreateVectorTripCount(BasicBlock *InsertBlock) {
2881   if (VectorTripCount)
2882     return VectorTripCount;
2883 
2884   Value *TC = getOrCreateTripCount(InsertBlock);
2885   IRBuilder<> Builder(InsertBlock->getTerminator());
2886 
2887   Type *Ty = TC->getType();
2888   // This is where we can make the step a runtime constant.
2889   Value *Step = createStepForVF(Builder, Ty, VF, UF);
2890 
2891   // If the tail is to be folded by masking, round the number of iterations N
2892   // up to a multiple of Step instead of rounding down. This is done by first
2893   // adding Step-1 and then rounding down. Note that it's ok if this addition
2894   // overflows: the vector induction variable will eventually wrap to zero given
2895   // that it starts at zero and its Step is a power of two; the loop will then
2896   // exit, with the last early-exit vector comparison also producing all-true.
2897   if (Cost->foldTailByMasking()) {
2898     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
2899            "VF*UF must be a power of 2 when folding tail by masking");
2900     Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF);
2901     TC = Builder.CreateAdd(
2902         TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up");
2903   }
2904 
2905   // Now we need to generate the expression for the part of the loop that the
2906   // vectorized body will execute. This is equal to N - (N % Step) if scalar
2907   // iterations are not required for correctness, or N - Step, otherwise. Step
2908   // is equal to the vectorization factor (number of SIMD elements) times the
2909   // unroll factor (number of SIMD instructions).
2910   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2911 
2912   // There are cases where we *must* run at least one iteration in the remainder
2913   // loop.  See the cost model for when this can happen.  If the step evenly
2914   // divides the trip count, we set the remainder to be equal to the step. If
2915   // the step does not evenly divide the trip count, no adjustment is necessary
2916   // since there will already be scalar iterations. Note that the minimum
2917   // iterations check ensures that N >= Step.
2918   if (Cost->requiresScalarEpilogue(VF)) {
2919     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2920     R = Builder.CreateSelect(IsZero, Step, R);
2921   }
2922 
2923   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2924 
2925   return VectorTripCount;
2926 }
2927 
2928 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
2929                                                    const DataLayout &DL) {
2930   // Verify that V is a vector type with same number of elements as DstVTy.
2931   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
2932   unsigned VF = DstFVTy->getNumElements();
2933   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
2934   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
2935   Type *SrcElemTy = SrcVecTy->getElementType();
2936   Type *DstElemTy = DstFVTy->getElementType();
2937   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2938          "Vector elements must have same size");
2939 
2940   // Do a direct cast if element types are castable.
2941   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2942     return Builder.CreateBitOrPointerCast(V, DstFVTy);
2943   }
2944   // V cannot be directly casted to desired vector type.
2945   // May happen when V is a floating point vector but DstVTy is a vector of
2946   // pointers or vice-versa. Handle this using a two-step bitcast using an
2947   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2948   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2949          "Only one type should be a pointer type");
2950   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2951          "Only one type should be a floating point type");
2952   Type *IntTy =
2953       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2954   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
2955   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2956   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
2957 }
2958 
2959 void InnerLoopVectorizer::emitMinimumIterationCountCheck(BasicBlock *Bypass) {
2960   Value *Count = getOrCreateTripCount(LoopVectorPreHeader);
2961   // Reuse existing vector loop preheader for TC checks.
2962   // Note that new preheader block is generated for vector loop.
2963   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
2964   IRBuilder<> Builder(TCCheckBlock->getTerminator());
2965 
2966   // Generate code to check if the loop's trip count is less than VF * UF, or
2967   // equal to it in case a scalar epilogue is required; this implies that the
2968   // vector trip count is zero. This check also covers the case where adding one
2969   // to the backedge-taken count overflowed leading to an incorrect trip count
2970   // of zero. In this case we will also jump to the scalar loop.
2971   auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE
2972                                             : ICmpInst::ICMP_ULT;
2973 
2974   // If tail is to be folded, vector loop takes care of all iterations.
2975   Value *CheckMinIters = Builder.getFalse();
2976   if (!Cost->foldTailByMasking()) {
2977     Value *Step = createStepForVF(Builder, Count->getType(), VF, UF);
2978     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
2979   }
2980   // Create new preheader for vector loop.
2981   LoopVectorPreHeader =
2982       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
2983                  "vector.ph");
2984 
2985   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
2986                                DT->getNode(Bypass)->getIDom()) &&
2987          "TC check is expected to dominate Bypass");
2988 
2989   // Update dominator for Bypass & LoopExit (if needed).
2990   DT->changeImmediateDominator(Bypass, TCCheckBlock);
2991   if (!Cost->requiresScalarEpilogue(VF))
2992     // If there is an epilogue which must run, there's no edge from the
2993     // middle block to exit blocks  and thus no need to update the immediate
2994     // dominator of the exit blocks.
2995     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
2996 
2997   ReplaceInstWithInst(
2998       TCCheckBlock->getTerminator(),
2999       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3000   LoopBypassBlocks.push_back(TCCheckBlock);
3001 }
3002 
3003 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(BasicBlock *Bypass) {
3004 
3005   BasicBlock *const SCEVCheckBlock =
3006       RTChecks.emitSCEVChecks(Bypass, LoopVectorPreHeader, LoopExitBlock);
3007   if (!SCEVCheckBlock)
3008     return nullptr;
3009 
3010   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3011            (OptForSizeBasedOnProfile &&
3012             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3013          "Cannot SCEV check stride or overflow when optimizing for size");
3014 
3015 
3016   // Update dominator only if this is first RT check.
3017   if (LoopBypassBlocks.empty()) {
3018     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3019     if (!Cost->requiresScalarEpilogue(VF))
3020       // If there is an epilogue which must run, there's no edge from the
3021       // middle block to exit blocks  and thus no need to update the immediate
3022       // dominator of the exit blocks.
3023       DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3024   }
3025 
3026   LoopBypassBlocks.push_back(SCEVCheckBlock);
3027   AddedSafetyChecks = true;
3028   return SCEVCheckBlock;
3029 }
3030 
3031 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(BasicBlock *Bypass) {
3032   // VPlan-native path does not do any analysis for runtime checks currently.
3033   if (EnableVPlanNativePath)
3034     return nullptr;
3035 
3036   BasicBlock *const MemCheckBlock =
3037       RTChecks.emitMemRuntimeChecks(Bypass, LoopVectorPreHeader);
3038 
3039   // Check if we generated code that checks in runtime if arrays overlap. We put
3040   // the checks into a separate block to make the more common case of few
3041   // elements faster.
3042   if (!MemCheckBlock)
3043     return nullptr;
3044 
3045   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3046     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3047            "Cannot emit memory checks when optimizing for size, unless forced "
3048            "to vectorize.");
3049     ORE->emit([&]() {
3050       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3051                                         OrigLoop->getStartLoc(),
3052                                         OrigLoop->getHeader())
3053              << "Code-size may be reduced by not forcing "
3054                 "vectorization, or by source-code modifications "
3055                 "eliminating the need for runtime checks "
3056                 "(e.g., adding 'restrict').";
3057     });
3058   }
3059 
3060   LoopBypassBlocks.push_back(MemCheckBlock);
3061 
3062   AddedSafetyChecks = true;
3063 
3064   // We currently don't use LoopVersioning for the actual loop cloning but we
3065   // still use it to add the noalias metadata.
3066   LVer = std::make_unique<LoopVersioning>(
3067       *Legal->getLAI(),
3068       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3069       DT, PSE.getSE());
3070   LVer->prepareNoAliasMetadata();
3071   return MemCheckBlock;
3072 }
3073 
3074 void InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3075   LoopScalarBody = OrigLoop->getHeader();
3076   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3077   assert(LoopVectorPreHeader && "Invalid loop structure");
3078   LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr
3079   assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) &&
3080          "multiple exit loop without required epilogue?");
3081 
3082   LoopMiddleBlock =
3083       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3084                  LI, nullptr, Twine(Prefix) + "middle.block");
3085   LoopScalarPreHeader =
3086       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3087                  nullptr, Twine(Prefix) + "scalar.ph");
3088 
3089   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3090 
3091   // Set up the middle block terminator.  Two cases:
3092   // 1) If we know that we must execute the scalar epilogue, emit an
3093   //    unconditional branch.
3094   // 2) Otherwise, we must have a single unique exit block (due to how we
3095   //    implement the multiple exit case).  In this case, set up a conditonal
3096   //    branch from the middle block to the loop scalar preheader, and the
3097   //    exit block.  completeLoopSkeleton will update the condition to use an
3098   //    iteration check, if required to decide whether to execute the remainder.
3099   BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ?
3100     BranchInst::Create(LoopScalarPreHeader) :
3101     BranchInst::Create(LoopExitBlock, LoopScalarPreHeader,
3102                        Builder.getTrue());
3103   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3104   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3105 
3106   SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3107              nullptr, nullptr, Twine(Prefix) + "vector.body");
3108 
3109   // Update dominator for loop exit.
3110   if (!Cost->requiresScalarEpilogue(VF))
3111     // If there is an epilogue which must run, there's no edge from the
3112     // middle block to exit blocks  and thus no need to update the immediate
3113     // dominator of the exit blocks.
3114     DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3115 }
3116 
3117 void InnerLoopVectorizer::createInductionResumeValues(
3118     std::pair<BasicBlock *, Value *> AdditionalBypass) {
3119   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3120           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3121          "Inconsistent information about additional bypass.");
3122 
3123   Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader);
3124   assert(VectorTripCount && "Expected valid arguments");
3125   // We are going to resume the execution of the scalar loop.
3126   // Go over all of the induction variables that we found and fix the
3127   // PHIs that are left in the scalar version of the loop.
3128   // The starting values of PHI nodes depend on the counter of the last
3129   // iteration in the vectorized loop.
3130   // If we come from a bypass edge then we need to start from the original
3131   // start value.
3132   Instruction *OldInduction = Legal->getPrimaryInduction();
3133   for (auto &InductionEntry : Legal->getInductionVars()) {
3134     PHINode *OrigPhi = InductionEntry.first;
3135     InductionDescriptor II = InductionEntry.second;
3136 
3137     // Create phi nodes to merge from the  backedge-taken check block.
3138     PHINode *BCResumeVal =
3139         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3140                         LoopScalarPreHeader->getTerminator());
3141     // Copy original phi DL over to the new one.
3142     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3143     Value *&EndValue = IVEndValues[OrigPhi];
3144     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3145     if (OrigPhi == OldInduction) {
3146       // We know what the end value is.
3147       EndValue = VectorTripCount;
3148     } else {
3149       IRBuilder<> B(LoopVectorPreHeader->getTerminator());
3150 
3151       // Fast-math-flags propagate from the original induction instruction.
3152       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3153         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3154 
3155       Type *StepType = II.getStep()->getType();
3156       Instruction::CastOps CastOp =
3157           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3158       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3159       Value *Step =
3160           CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint());
3161       EndValue = emitTransformedIndex(B, CRD, II.getStartValue(), Step, II);
3162       EndValue->setName("ind.end");
3163 
3164       // Compute the end value for the additional bypass (if applicable).
3165       if (AdditionalBypass.first) {
3166         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3167         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3168                                          StepType, true);
3169         Value *Step =
3170             CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint());
3171         CRD =
3172             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3173         EndValueFromAdditionalBypass =
3174             emitTransformedIndex(B, CRD, II.getStartValue(), Step, II);
3175         EndValueFromAdditionalBypass->setName("ind.end");
3176       }
3177     }
3178     // The new PHI merges the original incoming value, in case of a bypass,
3179     // or the value at the end of the vectorized loop.
3180     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3181 
3182     // Fix the scalar body counter (PHI node).
3183     // The old induction's phi node in the scalar body needs the truncated
3184     // value.
3185     for (BasicBlock *BB : LoopBypassBlocks)
3186       BCResumeVal->addIncoming(II.getStartValue(), BB);
3187 
3188     if (AdditionalBypass.first)
3189       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3190                                             EndValueFromAdditionalBypass);
3191 
3192     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3193   }
3194 }
3195 
3196 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(MDNode *OrigLoopID) {
3197   // The trip counts should be cached by now.
3198   Value *Count = getOrCreateTripCount(LoopVectorPreHeader);
3199   Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader);
3200 
3201   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3202 
3203   // Add a check in the middle block to see if we have completed
3204   // all of the iterations in the first vector loop.  Three cases:
3205   // 1) If we require a scalar epilogue, there is no conditional branch as
3206   //    we unconditionally branch to the scalar preheader.  Do nothing.
3207   // 2) If (N - N%VF) == N, then we *don't* need to run the remainder.
3208   //    Thus if tail is to be folded, we know we don't need to run the
3209   //    remainder and we can use the previous value for the condition (true).
3210   // 3) Otherwise, construct a runtime check.
3211   if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) {
3212     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3213                                         Count, VectorTripCount, "cmp.n",
3214                                         LoopMiddleBlock->getTerminator());
3215 
3216     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3217     // of the corresponding compare because they may have ended up with
3218     // different line numbers and we want to avoid awkward line stepping while
3219     // debugging. Eg. if the compare has got a line number inside the loop.
3220     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3221     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3222   }
3223 
3224 #ifdef EXPENSIVE_CHECKS
3225   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3226 #endif
3227 
3228   return LoopVectorPreHeader;
3229 }
3230 
3231 std::pair<BasicBlock *, Value *>
3232 InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3233   /*
3234    In this function we generate a new loop. The new loop will contain
3235    the vectorized instructions while the old loop will continue to run the
3236    scalar remainder.
3237 
3238        [ ] <-- loop iteration number check.
3239     /   |
3240    /    v
3241   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3242   |  /  |
3243   | /   v
3244   ||   [ ]     <-- vector pre header.
3245   |/    |
3246   |     v
3247   |    [  ] \
3248   |    [  ]_|   <-- vector loop.
3249   |     |
3250   |     v
3251   \   -[ ]   <--- middle-block.
3252    \/   |
3253    /\   v
3254    | ->[ ]     <--- new preheader.
3255    |    |
3256  (opt)  v      <-- edge from middle to exit iff epilogue is not required.
3257    |   [ ] \
3258    |   [ ]_|   <-- old scalar loop to handle remainder (scalar epilogue).
3259     \   |
3260      \  v
3261       >[ ]     <-- exit block(s).
3262    ...
3263    */
3264 
3265   // Get the metadata of the original loop before it gets modified.
3266   MDNode *OrigLoopID = OrigLoop->getLoopID();
3267 
3268   // Workaround!  Compute the trip count of the original loop and cache it
3269   // before we start modifying the CFG.  This code has a systemic problem
3270   // wherein it tries to run analysis over partially constructed IR; this is
3271   // wrong, and not simply for SCEV.  The trip count of the original loop
3272   // simply happens to be prone to hitting this in practice.  In theory, we
3273   // can hit the same issue for any SCEV, or ValueTracking query done during
3274   // mutation.  See PR49900.
3275   getOrCreateTripCount(OrigLoop->getLoopPreheader());
3276 
3277   // Create an empty vector loop, and prepare basic blocks for the runtime
3278   // checks.
3279   createVectorLoopSkeleton("");
3280 
3281   // Now, compare the new count to zero. If it is zero skip the vector loop and
3282   // jump to the scalar loop. This check also covers the case where the
3283   // backedge-taken count is uint##_max: adding one to it will overflow leading
3284   // to an incorrect trip count of zero. In this (rare) case we will also jump
3285   // to the scalar loop.
3286   emitMinimumIterationCountCheck(LoopScalarPreHeader);
3287 
3288   // Generate the code to check any assumptions that we've made for SCEV
3289   // expressions.
3290   emitSCEVChecks(LoopScalarPreHeader);
3291 
3292   // Generate the code that checks in runtime if arrays overlap. We put the
3293   // checks into a separate block to make the more common case of few elements
3294   // faster.
3295   emitMemRuntimeChecks(LoopScalarPreHeader);
3296 
3297   // Emit phis for the new starting index of the scalar loop.
3298   createInductionResumeValues();
3299 
3300   return {completeLoopSkeleton(OrigLoopID), nullptr};
3301 }
3302 
3303 // Fix up external users of the induction variable. At this point, we are
3304 // in LCSSA form, with all external PHIs that use the IV having one input value,
3305 // coming from the remainder loop. We need those PHIs to also have a correct
3306 // value for the IV when arriving directly from the middle block.
3307 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3308                                        const InductionDescriptor &II,
3309                                        Value *CountRoundDown, Value *EndValue,
3310                                        BasicBlock *MiddleBlock,
3311                                        BasicBlock *VectorHeader) {
3312   // There are two kinds of external IV usages - those that use the value
3313   // computed in the last iteration (the PHI) and those that use the penultimate
3314   // value (the value that feeds into the phi from the loop latch).
3315   // We allow both, but they, obviously, have different values.
3316 
3317   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3318 
3319   DenseMap<Value *, Value *> MissingVals;
3320 
3321   // An external user of the last iteration's value should see the value that
3322   // the remainder loop uses to initialize its own IV.
3323   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3324   for (User *U : PostInc->users()) {
3325     Instruction *UI = cast<Instruction>(U);
3326     if (!OrigLoop->contains(UI)) {
3327       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3328       MissingVals[UI] = EndValue;
3329     }
3330   }
3331 
3332   // An external user of the penultimate value need to see EndValue - Step.
3333   // The simplest way to get this is to recompute it from the constituent SCEVs,
3334   // that is Start + (Step * (CRD - 1)).
3335   for (User *U : OrigPhi->users()) {
3336     auto *UI = cast<Instruction>(U);
3337     if (!OrigLoop->contains(UI)) {
3338       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3339 
3340       IRBuilder<> B(MiddleBlock->getTerminator());
3341 
3342       // Fast-math-flags propagate from the original induction instruction.
3343       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3344         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3345 
3346       Value *CountMinusOne = B.CreateSub(
3347           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3348       Value *CMO =
3349           !II.getStep()->getType()->isIntegerTy()
3350               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3351                              II.getStep()->getType())
3352               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3353       CMO->setName("cast.cmo");
3354 
3355       Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(),
3356                                     VectorHeader->getTerminator());
3357       Value *Escape =
3358           emitTransformedIndex(B, CMO, II.getStartValue(), Step, II);
3359       Escape->setName("ind.escape");
3360       MissingVals[UI] = Escape;
3361     }
3362   }
3363 
3364   for (auto &I : MissingVals) {
3365     PHINode *PHI = cast<PHINode>(I.first);
3366     // One corner case we have to handle is two IVs "chasing" each-other,
3367     // that is %IV2 = phi [...], [ %IV1, %latch ]
3368     // In this case, if IV1 has an external use, we need to avoid adding both
3369     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3370     // don't already have an incoming value for the middle block.
3371     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3372       PHI->addIncoming(I.second, MiddleBlock);
3373   }
3374 }
3375 
3376 namespace {
3377 
3378 struct CSEDenseMapInfo {
3379   static bool canHandle(const Instruction *I) {
3380     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3381            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3382   }
3383 
3384   static inline Instruction *getEmptyKey() {
3385     return DenseMapInfo<Instruction *>::getEmptyKey();
3386   }
3387 
3388   static inline Instruction *getTombstoneKey() {
3389     return DenseMapInfo<Instruction *>::getTombstoneKey();
3390   }
3391 
3392   static unsigned getHashValue(const Instruction *I) {
3393     assert(canHandle(I) && "Unknown instruction!");
3394     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3395                                                            I->value_op_end()));
3396   }
3397 
3398   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3399     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3400         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3401       return LHS == RHS;
3402     return LHS->isIdenticalTo(RHS);
3403   }
3404 };
3405 
3406 } // end anonymous namespace
3407 
3408 ///Perform cse of induction variable instructions.
3409 static void cse(BasicBlock *BB) {
3410   // Perform simple cse.
3411   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3412   for (Instruction &In : llvm::make_early_inc_range(*BB)) {
3413     if (!CSEDenseMapInfo::canHandle(&In))
3414       continue;
3415 
3416     // Check if we can replace this instruction with any of the
3417     // visited instructions.
3418     if (Instruction *V = CSEMap.lookup(&In)) {
3419       In.replaceAllUsesWith(V);
3420       In.eraseFromParent();
3421       continue;
3422     }
3423 
3424     CSEMap[&In] = &In;
3425   }
3426 }
3427 
3428 InstructionCost
3429 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3430                                               bool &NeedToScalarize) const {
3431   Function *F = CI->getCalledFunction();
3432   Type *ScalarRetTy = CI->getType();
3433   SmallVector<Type *, 4> Tys, ScalarTys;
3434   for (auto &ArgOp : CI->args())
3435     ScalarTys.push_back(ArgOp->getType());
3436 
3437   // Estimate cost of scalarized vector call. The source operands are assumed
3438   // to be vectors, so we need to extract individual elements from there,
3439   // execute VF scalar calls, and then gather the result into the vector return
3440   // value.
3441   InstructionCost ScalarCallCost =
3442       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3443   if (VF.isScalar())
3444     return ScalarCallCost;
3445 
3446   // Compute corresponding vector type for return value and arguments.
3447   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3448   for (Type *ScalarTy : ScalarTys)
3449     Tys.push_back(ToVectorTy(ScalarTy, VF));
3450 
3451   // Compute costs of unpacking argument values for the scalar calls and
3452   // packing the return values to a vector.
3453   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3454 
3455   InstructionCost Cost =
3456       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3457 
3458   // If we can't emit a vector call for this function, then the currently found
3459   // cost is the cost we need to return.
3460   NeedToScalarize = true;
3461   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3462   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3463 
3464   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3465     return Cost;
3466 
3467   // If the corresponding vector cost is cheaper, return its cost.
3468   InstructionCost VectorCallCost =
3469       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3470   if (VectorCallCost < Cost) {
3471     NeedToScalarize = false;
3472     Cost = VectorCallCost;
3473   }
3474   return Cost;
3475 }
3476 
3477 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3478   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3479     return Elt;
3480   return VectorType::get(Elt, VF);
3481 }
3482 
3483 InstructionCost
3484 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3485                                                    ElementCount VF) const {
3486   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3487   assert(ID && "Expected intrinsic call!");
3488   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3489   FastMathFlags FMF;
3490   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3491     FMF = FPMO->getFastMathFlags();
3492 
3493   SmallVector<const Value *> Arguments(CI->args());
3494   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3495   SmallVector<Type *> ParamTys;
3496   std::transform(FTy->param_begin(), FTy->param_end(),
3497                  std::back_inserter(ParamTys),
3498                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3499 
3500   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3501                                     dyn_cast<IntrinsicInst>(CI));
3502   return TTI.getIntrinsicInstrCost(CostAttrs,
3503                                    TargetTransformInfo::TCK_RecipThroughput);
3504 }
3505 
3506 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3507   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3508   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3509   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3510 }
3511 
3512 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3513   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3514   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3515   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3516 }
3517 
3518 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3519   // For every instruction `I` in MinBWs, truncate the operands, create a
3520   // truncated version of `I` and reextend its result. InstCombine runs
3521   // later and will remove any ext/trunc pairs.
3522   SmallPtrSet<Value *, 4> Erased;
3523   for (const auto &KV : Cost->getMinimalBitwidths()) {
3524     // If the value wasn't vectorized, we must maintain the original scalar
3525     // type. The absence of the value from State indicates that it
3526     // wasn't vectorized.
3527     // FIXME: Should not rely on getVPValue at this point.
3528     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3529     if (!State.hasAnyVectorValue(Def))
3530       continue;
3531     for (unsigned Part = 0; Part < UF; ++Part) {
3532       Value *I = State.get(Def, Part);
3533       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3534         continue;
3535       Type *OriginalTy = I->getType();
3536       Type *ScalarTruncatedTy =
3537           IntegerType::get(OriginalTy->getContext(), KV.second);
3538       auto *TruncatedTy = VectorType::get(
3539           ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount());
3540       if (TruncatedTy == OriginalTy)
3541         continue;
3542 
3543       IRBuilder<> B(cast<Instruction>(I));
3544       auto ShrinkOperand = [&](Value *V) -> Value * {
3545         if (auto *ZI = dyn_cast<ZExtInst>(V))
3546           if (ZI->getSrcTy() == TruncatedTy)
3547             return ZI->getOperand(0);
3548         return B.CreateZExtOrTrunc(V, TruncatedTy);
3549       };
3550 
3551       // The actual instruction modification depends on the instruction type,
3552       // unfortunately.
3553       Value *NewI = nullptr;
3554       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3555         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3556                              ShrinkOperand(BO->getOperand(1)));
3557 
3558         // Any wrapping introduced by shrinking this operation shouldn't be
3559         // considered undefined behavior. So, we can't unconditionally copy
3560         // arithmetic wrapping flags to NewI.
3561         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3562       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3563         NewI =
3564             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3565                          ShrinkOperand(CI->getOperand(1)));
3566       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3567         NewI = B.CreateSelect(SI->getCondition(),
3568                               ShrinkOperand(SI->getTrueValue()),
3569                               ShrinkOperand(SI->getFalseValue()));
3570       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3571         switch (CI->getOpcode()) {
3572         default:
3573           llvm_unreachable("Unhandled cast!");
3574         case Instruction::Trunc:
3575           NewI = ShrinkOperand(CI->getOperand(0));
3576           break;
3577         case Instruction::SExt:
3578           NewI = B.CreateSExtOrTrunc(
3579               CI->getOperand(0),
3580               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3581           break;
3582         case Instruction::ZExt:
3583           NewI = B.CreateZExtOrTrunc(
3584               CI->getOperand(0),
3585               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3586           break;
3587         }
3588       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3589         auto Elements0 =
3590             cast<VectorType>(SI->getOperand(0)->getType())->getElementCount();
3591         auto *O0 = B.CreateZExtOrTrunc(
3592             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3593         auto Elements1 =
3594             cast<VectorType>(SI->getOperand(1)->getType())->getElementCount();
3595         auto *O1 = B.CreateZExtOrTrunc(
3596             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3597 
3598         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3599       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3600         // Don't do anything with the operands, just extend the result.
3601         continue;
3602       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3603         auto Elements =
3604             cast<VectorType>(IE->getOperand(0)->getType())->getElementCount();
3605         auto *O0 = B.CreateZExtOrTrunc(
3606             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3607         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3608         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3609       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3610         auto Elements =
3611             cast<VectorType>(EE->getOperand(0)->getType())->getElementCount();
3612         auto *O0 = B.CreateZExtOrTrunc(
3613             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3614         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3615       } else {
3616         // If we don't know what to do, be conservative and don't do anything.
3617         continue;
3618       }
3619 
3620       // Lastly, extend the result.
3621       NewI->takeName(cast<Instruction>(I));
3622       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3623       I->replaceAllUsesWith(Res);
3624       cast<Instruction>(I)->eraseFromParent();
3625       Erased.insert(I);
3626       State.reset(Def, Res, Part);
3627     }
3628   }
3629 
3630   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3631   for (const auto &KV : Cost->getMinimalBitwidths()) {
3632     // If the value wasn't vectorized, we must maintain the original scalar
3633     // type. The absence of the value from State indicates that it
3634     // wasn't vectorized.
3635     // FIXME: Should not rely on getVPValue at this point.
3636     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3637     if (!State.hasAnyVectorValue(Def))
3638       continue;
3639     for (unsigned Part = 0; Part < UF; ++Part) {
3640       Value *I = State.get(Def, Part);
3641       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3642       if (Inst && Inst->use_empty()) {
3643         Value *NewI = Inst->getOperand(0);
3644         Inst->eraseFromParent();
3645         State.reset(Def, NewI, Part);
3646       }
3647     }
3648   }
3649 }
3650 
3651 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
3652   // Insert truncates and extends for any truncated instructions as hints to
3653   // InstCombine.
3654   if (VF.isVector())
3655     truncateToMinimalBitwidths(State);
3656 
3657   // Fix widened non-induction PHIs by setting up the PHI operands.
3658   if (OrigPHIsToFix.size()) {
3659     assert(EnableVPlanNativePath &&
3660            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3661     fixNonInductionPHIs(State);
3662   }
3663 
3664   // At this point every instruction in the original loop is widened to a
3665   // vector form. Now we need to fix the recurrences in the loop. These PHI
3666   // nodes are currently empty because we did not want to introduce cycles.
3667   // This is the second stage of vectorizing recurrences.
3668   fixCrossIterationPHIs(State);
3669 
3670   // Forget the original basic block.
3671   PSE.getSE()->forgetLoop(OrigLoop);
3672 
3673   Loop *VectorLoop = LI->getLoopFor(State.CFG.PrevBB);
3674   // If we inserted an edge from the middle block to the unique exit block,
3675   // update uses outside the loop (phis) to account for the newly inserted
3676   // edge.
3677   if (!Cost->requiresScalarEpilogue(VF)) {
3678     // Fix-up external users of the induction variables.
3679     for (auto &Entry : Legal->getInductionVars())
3680       fixupIVUsers(Entry.first, Entry.second,
3681                    getOrCreateVectorTripCount(VectorLoop->getLoopPreheader()),
3682                    IVEndValues[Entry.first], LoopMiddleBlock,
3683                    VectorLoop->getHeader());
3684 
3685     fixLCSSAPHIs(State);
3686   }
3687 
3688   for (Instruction *PI : PredicatedInstructions)
3689     sinkScalarOperands(&*PI);
3690 
3691   // Remove redundant induction instructions.
3692   cse(VectorLoop->getHeader());
3693 
3694   // Set/update profile weights for the vector and remainder loops as original
3695   // loop iterations are now distributed among them. Note that original loop
3696   // represented by LoopScalarBody becomes remainder loop after vectorization.
3697   //
3698   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
3699   // end up getting slightly roughened result but that should be OK since
3700   // profile is not inherently precise anyway. Note also possible bypass of
3701   // vector code caused by legality checks is ignored, assigning all the weight
3702   // to the vector loop, optimistically.
3703   //
3704   // For scalable vectorization we can't know at compile time how many iterations
3705   // of the loop are handled in one vector iteration, so instead assume a pessimistic
3706   // vscale of '1'.
3707   setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), VectorLoop,
3708                                LI->getLoopFor(LoopScalarBody),
3709                                VF.getKnownMinValue() * UF);
3710 }
3711 
3712 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
3713   // In order to support recurrences we need to be able to vectorize Phi nodes.
3714   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3715   // stage #2: We now need to fix the recurrences by adding incoming edges to
3716   // the currently empty PHI nodes. At this point every instruction in the
3717   // original loop is widened to a vector form so we can use them to construct
3718   // the incoming edges.
3719   VPBasicBlock *Header =
3720       State.Plan->getVectorLoopRegion()->getEntryBasicBlock();
3721   for (VPRecipeBase &R : Header->phis()) {
3722     if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
3723       fixReduction(ReductionPhi, State);
3724     else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
3725       fixFirstOrderRecurrence(FOR, State);
3726   }
3727 }
3728 
3729 void InnerLoopVectorizer::fixFirstOrderRecurrence(
3730     VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) {
3731   // This is the second phase of vectorizing first-order recurrences. An
3732   // overview of the transformation is described below. Suppose we have the
3733   // following loop.
3734   //
3735   //   for (int i = 0; i < n; ++i)
3736   //     b[i] = a[i] - a[i - 1];
3737   //
3738   // There is a first-order recurrence on "a". For this loop, the shorthand
3739   // scalar IR looks like:
3740   //
3741   //   scalar.ph:
3742   //     s_init = a[-1]
3743   //     br scalar.body
3744   //
3745   //   scalar.body:
3746   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3747   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3748   //     s2 = a[i]
3749   //     b[i] = s2 - s1
3750   //     br cond, scalar.body, ...
3751   //
3752   // In this example, s1 is a recurrence because it's value depends on the
3753   // previous iteration. In the first phase of vectorization, we created a
3754   // vector phi v1 for s1. We now complete the vectorization and produce the
3755   // shorthand vector IR shown below (for VF = 4, UF = 1).
3756   //
3757   //   vector.ph:
3758   //     v_init = vector(..., ..., ..., a[-1])
3759   //     br vector.body
3760   //
3761   //   vector.body
3762   //     i = phi [0, vector.ph], [i+4, vector.body]
3763   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
3764   //     v2 = a[i, i+1, i+2, i+3];
3765   //     v3 = vector(v1(3), v2(0, 1, 2))
3766   //     b[i, i+1, i+2, i+3] = v2 - v3
3767   //     br cond, vector.body, middle.block
3768   //
3769   //   middle.block:
3770   //     x = v2(3)
3771   //     br scalar.ph
3772   //
3773   //   scalar.ph:
3774   //     s_init = phi [x, middle.block], [a[-1], otherwise]
3775   //     br scalar.body
3776   //
3777   // After execution completes the vector loop, we extract the next value of
3778   // the recurrence (x) to use as the initial value in the scalar loop.
3779 
3780   // Extract the last vector element in the middle block. This will be the
3781   // initial value for the recurrence when jumping to the scalar loop.
3782   VPValue *PreviousDef = PhiR->getBackedgeValue();
3783   Value *Incoming = State.get(PreviousDef, UF - 1);
3784   auto *ExtractForScalar = Incoming;
3785   auto *IdxTy = Builder.getInt32Ty();
3786   if (VF.isVector()) {
3787     auto *One = ConstantInt::get(IdxTy, 1);
3788     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3789     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
3790     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
3791     ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
3792                                                     "vector.recur.extract");
3793   }
3794   // Extract the second last element in the middle block if the
3795   // Phi is used outside the loop. We need to extract the phi itself
3796   // and not the last element (the phi update in the current iteration). This
3797   // will be the value when jumping to the exit block from the LoopMiddleBlock,
3798   // when the scalar loop is not run at all.
3799   Value *ExtractForPhiUsedOutsideLoop = nullptr;
3800   if (VF.isVector()) {
3801     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
3802     auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
3803     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
3804         Incoming, Idx, "vector.recur.extract.for.phi");
3805   } else if (UF > 1)
3806     // When loop is unrolled without vectorizing, initialize
3807     // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
3808     // of `Incoming`. This is analogous to the vectorized case above: extracting
3809     // the second last element when VF > 1.
3810     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
3811 
3812   // Fix the initial value of the original recurrence in the scalar loop.
3813   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
3814   PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue());
3815   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
3816   auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue();
3817   for (auto *BB : predecessors(LoopScalarPreHeader)) {
3818     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
3819     Start->addIncoming(Incoming, BB);
3820   }
3821 
3822   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
3823   Phi->setName("scalar.recur");
3824 
3825   // Finally, fix users of the recurrence outside the loop. The users will need
3826   // either the last value of the scalar recurrence or the last value of the
3827   // vector recurrence we extracted in the middle block. Since the loop is in
3828   // LCSSA form, we just need to find all the phi nodes for the original scalar
3829   // recurrence in the exit block, and then add an edge for the middle block.
3830   // Note that LCSSA does not imply single entry when the original scalar loop
3831   // had multiple exiting edges (as we always run the last iteration in the
3832   // scalar epilogue); in that case, there is no edge from middle to exit and
3833   // and thus no phis which needed updated.
3834   if (!Cost->requiresScalarEpilogue(VF))
3835     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
3836       if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi))
3837         LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
3838 }
3839 
3840 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR,
3841                                        VPTransformState &State) {
3842   PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
3843   // Get it's reduction variable descriptor.
3844   assert(Legal->isReductionVariable(OrigPhi) &&
3845          "Unable to find the reduction variable");
3846   const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
3847 
3848   RecurKind RK = RdxDesc.getRecurrenceKind();
3849   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
3850   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
3851   setDebugLocFromInst(ReductionStartValue);
3852 
3853   VPValue *LoopExitInstDef = PhiR->getBackedgeValue();
3854   // This is the vector-clone of the value that leaves the loop.
3855   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
3856 
3857   // Wrap flags are in general invalid after vectorization, clear them.
3858   clearReductionWrapFlags(RdxDesc, State);
3859 
3860   // Before each round, move the insertion point right between
3861   // the PHIs and the values we are going to write.
3862   // This allows us to write both PHINodes and the extractelement
3863   // instructions.
3864   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3865 
3866   setDebugLocFromInst(LoopExitInst);
3867 
3868   Type *PhiTy = OrigPhi->getType();
3869   BasicBlock *VectorLoopLatch =
3870       LI->getLoopFor(State.CFG.PrevBB)->getLoopLatch();
3871   // If tail is folded by masking, the vector value to leave the loop should be
3872   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
3873   // instead of the former. For an inloop reduction the reduction will already
3874   // be predicated, and does not need to be handled here.
3875   if (Cost->foldTailByMasking() && !PhiR->isInLoop()) {
3876     for (unsigned Part = 0; Part < UF; ++Part) {
3877       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
3878       Value *Sel = nullptr;
3879       for (User *U : VecLoopExitInst->users()) {
3880         if (isa<SelectInst>(U)) {
3881           assert(!Sel && "Reduction exit feeding two selects");
3882           Sel = U;
3883         } else
3884           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
3885       }
3886       assert(Sel && "Reduction exit feeds no select");
3887       State.reset(LoopExitInstDef, Sel, Part);
3888 
3889       // If the target can create a predicated operator for the reduction at no
3890       // extra cost in the loop (for example a predicated vadd), it can be
3891       // cheaper for the select to remain in the loop than be sunk out of it,
3892       // and so use the select value for the phi instead of the old
3893       // LoopExitValue.
3894       if (PreferPredicatedReductionSelect ||
3895           TTI->preferPredicatedReductionSelect(
3896               RdxDesc.getOpcode(), PhiTy,
3897               TargetTransformInfo::ReductionFlags())) {
3898         auto *VecRdxPhi =
3899             cast<PHINode>(State.get(PhiR, Part));
3900         VecRdxPhi->setIncomingValueForBlock(VectorLoopLatch, Sel);
3901       }
3902     }
3903   }
3904 
3905   // If the vector reduction can be performed in a smaller type, we truncate
3906   // then extend the loop exit value to enable InstCombine to evaluate the
3907   // entire expression in the smaller type.
3908   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
3909     assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
3910     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
3911     Builder.SetInsertPoint(VectorLoopLatch->getTerminator());
3912     VectorParts RdxParts(UF);
3913     for (unsigned Part = 0; Part < UF; ++Part) {
3914       RdxParts[Part] = State.get(LoopExitInstDef, Part);
3915       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3916       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
3917                                         : Builder.CreateZExt(Trunc, VecTy);
3918       for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users()))
3919         if (U != Trunc) {
3920           U->replaceUsesOfWith(RdxParts[Part], Extnd);
3921           RdxParts[Part] = Extnd;
3922         }
3923     }
3924     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3925     for (unsigned Part = 0; Part < UF; ++Part) {
3926       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3927       State.reset(LoopExitInstDef, RdxParts[Part], Part);
3928     }
3929   }
3930 
3931   // Reduce all of the unrolled parts into a single vector.
3932   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
3933   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
3934 
3935   // The middle block terminator has already been assigned a DebugLoc here (the
3936   // OrigLoop's single latch terminator). We want the whole middle block to
3937   // appear to execute on this line because: (a) it is all compiler generated,
3938   // (b) these instructions are always executed after evaluating the latch
3939   // conditional branch, and (c) other passes may add new predecessors which
3940   // terminate on this line. This is the easiest way to ensure we don't
3941   // accidentally cause an extra step back into the loop while debugging.
3942   setDebugLocFromInst(LoopMiddleBlock->getTerminator());
3943   if (PhiR->isOrdered())
3944     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
3945   else {
3946     // Floating-point operations should have some FMF to enable the reduction.
3947     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
3948     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
3949     for (unsigned Part = 1; Part < UF; ++Part) {
3950       Value *RdxPart = State.get(LoopExitInstDef, Part);
3951       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
3952         ReducedPartRdx = Builder.CreateBinOp(
3953             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
3954       } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK))
3955         ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK,
3956                                            ReducedPartRdx, RdxPart);
3957       else
3958         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
3959     }
3960   }
3961 
3962   // Create the reduction after the loop. Note that inloop reductions create the
3963   // target reduction in the loop using a Reduction recipe.
3964   if (VF.isVector() && !PhiR->isInLoop()) {
3965     ReducedPartRdx =
3966         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi);
3967     // If the reduction can be performed in a smaller type, we need to extend
3968     // the reduction to the wider type before we branch to the original loop.
3969     if (PhiTy != RdxDesc.getRecurrenceType())
3970       ReducedPartRdx = RdxDesc.isSigned()
3971                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
3972                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
3973   }
3974 
3975   PHINode *ResumePhi =
3976       dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue());
3977 
3978   // Create a phi node that merges control-flow from the backedge-taken check
3979   // block and the middle block.
3980   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
3981                                         LoopScalarPreHeader->getTerminator());
3982 
3983   // If we are fixing reductions in the epilogue loop then we should already
3984   // have created a bc.merge.rdx Phi after the main vector body. Ensure that
3985   // we carry over the incoming values correctly.
3986   for (auto *Incoming : predecessors(LoopScalarPreHeader)) {
3987     if (Incoming == LoopMiddleBlock)
3988       BCBlockPhi->addIncoming(ReducedPartRdx, Incoming);
3989     else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming))
3990       BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming),
3991                               Incoming);
3992     else
3993       BCBlockPhi->addIncoming(ReductionStartValue, Incoming);
3994   }
3995 
3996   // Set the resume value for this reduction
3997   ReductionResumeValues.insert({&RdxDesc, BCBlockPhi});
3998 
3999   // Now, we need to fix the users of the reduction variable
4000   // inside and outside of the scalar remainder loop.
4001 
4002   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4003   // in the exit blocks.  See comment on analogous loop in
4004   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4005   if (!Cost->requiresScalarEpilogue(VF))
4006     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4007       if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst))
4008         LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4009 
4010   // Fix the scalar loop reduction variable with the incoming reduction sum
4011   // from the vector body and from the backedge value.
4012   int IncomingEdgeBlockIdx =
4013       OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4014   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4015   // Pick the other block.
4016   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4017   OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4018   OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4019 }
4020 
4021 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
4022                                                   VPTransformState &State) {
4023   RecurKind RK = RdxDesc.getRecurrenceKind();
4024   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4025     return;
4026 
4027   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4028   assert(LoopExitInstr && "null loop exit instruction");
4029   SmallVector<Instruction *, 8> Worklist;
4030   SmallPtrSet<Instruction *, 8> Visited;
4031   Worklist.push_back(LoopExitInstr);
4032   Visited.insert(LoopExitInstr);
4033 
4034   while (!Worklist.empty()) {
4035     Instruction *Cur = Worklist.pop_back_val();
4036     if (isa<OverflowingBinaryOperator>(Cur))
4037       for (unsigned Part = 0; Part < UF; ++Part) {
4038         // FIXME: Should not rely on getVPValue at this point.
4039         Value *V = State.get(State.Plan->getVPValue(Cur, true), Part);
4040         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4041       }
4042 
4043     for (User *U : Cur->users()) {
4044       Instruction *UI = cast<Instruction>(U);
4045       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4046           Visited.insert(UI).second)
4047         Worklist.push_back(UI);
4048     }
4049   }
4050 }
4051 
4052 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4053   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4054     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4055       // Some phis were already hand updated by the reduction and recurrence
4056       // code above, leave them alone.
4057       continue;
4058 
4059     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4060     // Non-instruction incoming values will have only one value.
4061 
4062     VPLane Lane = VPLane::getFirstLane();
4063     if (isa<Instruction>(IncomingValue) &&
4064         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4065                                            VF))
4066       Lane = VPLane::getLastLaneForVF(VF);
4067 
4068     // Can be a loop invariant incoming value or the last scalar value to be
4069     // extracted from the vectorized loop.
4070     // FIXME: Should not rely on getVPValue at this point.
4071     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4072     Value *lastIncomingValue =
4073         OrigLoop->isLoopInvariant(IncomingValue)
4074             ? IncomingValue
4075             : State.get(State.Plan->getVPValue(IncomingValue, true),
4076                         VPIteration(UF - 1, Lane));
4077     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4078   }
4079 }
4080 
4081 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4082   // The basic block and loop containing the predicated instruction.
4083   auto *PredBB = PredInst->getParent();
4084   auto *VectorLoop = LI->getLoopFor(PredBB);
4085 
4086   // Initialize a worklist with the operands of the predicated instruction.
4087   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4088 
4089   // Holds instructions that we need to analyze again. An instruction may be
4090   // reanalyzed if we don't yet know if we can sink it or not.
4091   SmallVector<Instruction *, 8> InstsToReanalyze;
4092 
4093   // Returns true if a given use occurs in the predicated block. Phi nodes use
4094   // their operands in their corresponding predecessor blocks.
4095   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4096     auto *I = cast<Instruction>(U.getUser());
4097     BasicBlock *BB = I->getParent();
4098     if (auto *Phi = dyn_cast<PHINode>(I))
4099       BB = Phi->getIncomingBlock(
4100           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4101     return BB == PredBB;
4102   };
4103 
4104   // Iteratively sink the scalarized operands of the predicated instruction
4105   // into the block we created for it. When an instruction is sunk, it's
4106   // operands are then added to the worklist. The algorithm ends after one pass
4107   // through the worklist doesn't sink a single instruction.
4108   bool Changed;
4109   do {
4110     // Add the instructions that need to be reanalyzed to the worklist, and
4111     // reset the changed indicator.
4112     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4113     InstsToReanalyze.clear();
4114     Changed = false;
4115 
4116     while (!Worklist.empty()) {
4117       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4118 
4119       // We can't sink an instruction if it is a phi node, is not in the loop,
4120       // or may have side effects.
4121       if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
4122           I->mayHaveSideEffects())
4123         continue;
4124 
4125       // If the instruction is already in PredBB, check if we can sink its
4126       // operands. In that case, VPlan's sinkScalarOperands() succeeded in
4127       // sinking the scalar instruction I, hence it appears in PredBB; but it
4128       // may have failed to sink I's operands (recursively), which we try
4129       // (again) here.
4130       if (I->getParent() == PredBB) {
4131         Worklist.insert(I->op_begin(), I->op_end());
4132         continue;
4133       }
4134 
4135       // It's legal to sink the instruction if all its uses occur in the
4136       // predicated block. Otherwise, there's nothing to do yet, and we may
4137       // need to reanalyze the instruction.
4138       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4139         InstsToReanalyze.push_back(I);
4140         continue;
4141       }
4142 
4143       // Move the instruction to the beginning of the predicated block, and add
4144       // it's operands to the worklist.
4145       I->moveBefore(&*PredBB->getFirstInsertionPt());
4146       Worklist.insert(I->op_begin(), I->op_end());
4147 
4148       // The sinking may have enabled other instructions to be sunk, so we will
4149       // need to iterate.
4150       Changed = true;
4151     }
4152   } while (Changed);
4153 }
4154 
4155 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4156   for (PHINode *OrigPhi : OrigPHIsToFix) {
4157     VPWidenPHIRecipe *VPPhi =
4158         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4159     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4160     // Make sure the builder has a valid insert point.
4161     Builder.SetInsertPoint(NewPhi);
4162     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4163       VPValue *Inc = VPPhi->getIncomingValue(i);
4164       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4165       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4166     }
4167   }
4168 }
4169 
4170 bool InnerLoopVectorizer::useOrderedReductions(
4171     const RecurrenceDescriptor &RdxDesc) {
4172   return Cost->useOrderedReductions(RdxDesc);
4173 }
4174 
4175 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4176                                               VPWidenPHIRecipe *PhiR,
4177                                               VPTransformState &State) {
4178   assert(EnableVPlanNativePath &&
4179          "Non-native vplans are not expected to have VPWidenPHIRecipes.");
4180   // Currently we enter here in the VPlan-native path for non-induction
4181   // PHIs where all control flow is uniform. We simply widen these PHIs.
4182   // Create a vector phi with no operands - the vector phi operands will be
4183   // set at the end of vector code generation.
4184   Type *VecTy = (State.VF.isScalar())
4185                     ? PN->getType()
4186                     : VectorType::get(PN->getType(), State.VF);
4187   Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4188   State.set(PhiR, VecPhi, 0);
4189   OrigPHIsToFix.push_back(cast<PHINode>(PN));
4190 }
4191 
4192 /// A helper function for checking whether an integer division-related
4193 /// instruction may divide by zero (in which case it must be predicated if
4194 /// executed conditionally in the scalar code).
4195 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4196 /// Non-zero divisors that are non compile-time constants will not be
4197 /// converted into multiplication, so we will still end up scalarizing
4198 /// the division, but can do so w/o predication.
4199 static bool mayDivideByZero(Instruction &I) {
4200   assert((I.getOpcode() == Instruction::UDiv ||
4201           I.getOpcode() == Instruction::SDiv ||
4202           I.getOpcode() == Instruction::URem ||
4203           I.getOpcode() == Instruction::SRem) &&
4204          "Unexpected instruction");
4205   Value *Divisor = I.getOperand(1);
4206   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4207   return !CInt || CInt->isZero();
4208 }
4209 
4210 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4211                                                VPUser &ArgOperands,
4212                                                VPTransformState &State) {
4213   assert(!isa<DbgInfoIntrinsic>(I) &&
4214          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4215   setDebugLocFromInst(&I);
4216 
4217   Module *M = I.getParent()->getParent()->getParent();
4218   auto *CI = cast<CallInst>(&I);
4219 
4220   SmallVector<Type *, 4> Tys;
4221   for (Value *ArgOperand : CI->args())
4222     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4223 
4224   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4225 
4226   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4227   // version of the instruction.
4228   // Is it beneficial to perform intrinsic call compared to lib call?
4229   bool NeedToScalarize = false;
4230   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4231   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4232   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4233   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4234          "Instruction should be scalarized elsewhere.");
4235   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
4236          "Either the intrinsic cost or vector call cost must be valid");
4237 
4238   for (unsigned Part = 0; Part < UF; ++Part) {
4239     SmallVector<Type *, 2> TysForDecl = {CI->getType()};
4240     SmallVector<Value *, 4> Args;
4241     for (auto &I : enumerate(ArgOperands.operands())) {
4242       // Some intrinsics have a scalar argument - don't replace it with a
4243       // vector.
4244       Value *Arg;
4245       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4246         Arg = State.get(I.value(), Part);
4247       else {
4248         Arg = State.get(I.value(), VPIteration(0, 0));
4249         if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index()))
4250           TysForDecl.push_back(Arg->getType());
4251       }
4252       Args.push_back(Arg);
4253     }
4254 
4255     Function *VectorF;
4256     if (UseVectorIntrinsic) {
4257       // Use vector version of the intrinsic.
4258       if (VF.isVector())
4259         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4260       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4261       assert(VectorF && "Can't retrieve vector intrinsic.");
4262     } else {
4263       // Use vector version of the function call.
4264       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4265 #ifndef NDEBUG
4266       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4267              "Can't create vector function.");
4268 #endif
4269         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4270     }
4271       SmallVector<OperandBundleDef, 1> OpBundles;
4272       CI->getOperandBundlesAsDefs(OpBundles);
4273       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4274 
4275       if (isa<FPMathOperator>(V))
4276         V->copyFastMathFlags(CI);
4277 
4278       State.set(Def, V, Part);
4279       addMetadata(V, &I);
4280   }
4281 }
4282 
4283 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
4284   // We should not collect Scalars more than once per VF. Right now, this
4285   // function is called from collectUniformsAndScalars(), which already does
4286   // this check. Collecting Scalars for VF=1 does not make any sense.
4287   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
4288          "This function should not be visited twice for the same VF");
4289 
4290   // This avoids any chances of creating a REPLICATE recipe during planning
4291   // since that would result in generation of scalarized code during execution,
4292   // which is not supported for scalable vectors.
4293   if (VF.isScalable()) {
4294     Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end());
4295     return;
4296   }
4297 
4298   SmallSetVector<Instruction *, 8> Worklist;
4299 
4300   // These sets are used to seed the analysis with pointers used by memory
4301   // accesses that will remain scalar.
4302   SmallSetVector<Instruction *, 8> ScalarPtrs;
4303   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4304   auto *Latch = TheLoop->getLoopLatch();
4305 
4306   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4307   // The pointer operands of loads and stores will be scalar as long as the
4308   // memory access is not a gather or scatter operation. The value operand of a
4309   // store will remain scalar if the store is scalarized.
4310   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4311     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4312     assert(WideningDecision != CM_Unknown &&
4313            "Widening decision should be ready at this moment");
4314     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4315       if (Ptr == Store->getValueOperand())
4316         return WideningDecision == CM_Scalarize;
4317     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4318            "Ptr is neither a value or pointer operand");
4319     return WideningDecision != CM_GatherScatter;
4320   };
4321 
4322   // A helper that returns true if the given value is a bitcast or
4323   // getelementptr instruction contained in the loop.
4324   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4325     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4326             isa<GetElementPtrInst>(V)) &&
4327            !TheLoop->isLoopInvariant(V);
4328   };
4329 
4330   // A helper that evaluates a memory access's use of a pointer. If the use will
4331   // be a scalar use and the pointer is only used by memory accesses, we place
4332   // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4333   // PossibleNonScalarPtrs.
4334   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4335     // We only care about bitcast and getelementptr instructions contained in
4336     // the loop.
4337     if (!isLoopVaryingBitCastOrGEP(Ptr))
4338       return;
4339 
4340     // If the pointer has already been identified as scalar (e.g., if it was
4341     // also identified as uniform), there's nothing to do.
4342     auto *I = cast<Instruction>(Ptr);
4343     if (Worklist.count(I))
4344       return;
4345 
4346     // If the use of the pointer will be a scalar use, and all users of the
4347     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4348     // place the pointer in PossibleNonScalarPtrs.
4349     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4350           return isa<LoadInst>(U) || isa<StoreInst>(U);
4351         }))
4352       ScalarPtrs.insert(I);
4353     else
4354       PossibleNonScalarPtrs.insert(I);
4355   };
4356 
4357   // We seed the scalars analysis with three classes of instructions: (1)
4358   // instructions marked uniform-after-vectorization and (2) bitcast,
4359   // getelementptr and (pointer) phi instructions used by memory accesses
4360   // requiring a scalar use.
4361   //
4362   // (1) Add to the worklist all instructions that have been identified as
4363   // uniform-after-vectorization.
4364   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4365 
4366   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4367   // memory accesses requiring a scalar use. The pointer operands of loads and
4368   // stores will be scalar as long as the memory accesses is not a gather or
4369   // scatter operation. The value operand of a store will remain scalar if the
4370   // store is scalarized.
4371   for (auto *BB : TheLoop->blocks())
4372     for (auto &I : *BB) {
4373       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4374         evaluatePtrUse(Load, Load->getPointerOperand());
4375       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4376         evaluatePtrUse(Store, Store->getPointerOperand());
4377         evaluatePtrUse(Store, Store->getValueOperand());
4378       }
4379     }
4380   for (auto *I : ScalarPtrs)
4381     if (!PossibleNonScalarPtrs.count(I)) {
4382       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4383       Worklist.insert(I);
4384     }
4385 
4386   // Insert the forced scalars.
4387   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4388   // induction variable when the PHI user is scalarized.
4389   auto ForcedScalar = ForcedScalars.find(VF);
4390   if (ForcedScalar != ForcedScalars.end())
4391     for (auto *I : ForcedScalar->second)
4392       Worklist.insert(I);
4393 
4394   // Expand the worklist by looking through any bitcasts and getelementptr
4395   // instructions we've already identified as scalar. This is similar to the
4396   // expansion step in collectLoopUniforms(); however, here we're only
4397   // expanding to include additional bitcasts and getelementptr instructions.
4398   unsigned Idx = 0;
4399   while (Idx != Worklist.size()) {
4400     Instruction *Dst = Worklist[Idx++];
4401     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4402       continue;
4403     auto *Src = cast<Instruction>(Dst->getOperand(0));
4404     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4405           auto *J = cast<Instruction>(U);
4406           return !TheLoop->contains(J) || Worklist.count(J) ||
4407                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4408                   isScalarUse(J, Src));
4409         })) {
4410       Worklist.insert(Src);
4411       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4412     }
4413   }
4414 
4415   // An induction variable will remain scalar if all users of the induction
4416   // variable and induction variable update remain scalar.
4417   for (auto &Induction : Legal->getInductionVars()) {
4418     auto *Ind = Induction.first;
4419     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4420 
4421     // If tail-folding is applied, the primary induction variable will be used
4422     // to feed a vector compare.
4423     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
4424       continue;
4425 
4426     // Returns true if \p Indvar is a pointer induction that is used directly by
4427     // load/store instruction \p I.
4428     auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
4429                                               Instruction *I) {
4430       return Induction.second.getKind() ==
4431                  InductionDescriptor::IK_PtrInduction &&
4432              (isa<LoadInst>(I) || isa<StoreInst>(I)) &&
4433              Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar);
4434     };
4435 
4436     // Determine if all users of the induction variable are scalar after
4437     // vectorization.
4438     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4439       auto *I = cast<Instruction>(U);
4440       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4441              IsDirectLoadStoreFromPtrIndvar(Ind, I);
4442     });
4443     if (!ScalarInd)
4444       continue;
4445 
4446     // Determine if all users of the induction variable update instruction are
4447     // scalar after vectorization.
4448     auto ScalarIndUpdate =
4449         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4450           auto *I = cast<Instruction>(U);
4451           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4452                  IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
4453         });
4454     if (!ScalarIndUpdate)
4455       continue;
4456 
4457     // The induction variable and its update instruction will remain scalar.
4458     Worklist.insert(Ind);
4459     Worklist.insert(IndUpdate);
4460     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4461     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4462                       << "\n");
4463   }
4464 
4465   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4466 }
4467 
4468 bool LoopVectorizationCostModel::isScalarWithPredication(
4469     Instruction *I, ElementCount VF) const {
4470   if (!blockNeedsPredicationForAnyReason(I->getParent()))
4471     return false;
4472   switch(I->getOpcode()) {
4473   default:
4474     break;
4475   case Instruction::Load:
4476   case Instruction::Store: {
4477     if (!Legal->isMaskRequired(I))
4478       return false;
4479     auto *Ptr = getLoadStorePointerOperand(I);
4480     auto *Ty = getLoadStoreType(I);
4481     Type *VTy = Ty;
4482     if (VF.isVector())
4483       VTy = VectorType::get(Ty, VF);
4484     const Align Alignment = getLoadStoreAlignment(I);
4485     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4486                                 TTI.isLegalMaskedGather(VTy, Alignment))
4487                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4488                                 TTI.isLegalMaskedScatter(VTy, Alignment));
4489   }
4490   case Instruction::UDiv:
4491   case Instruction::SDiv:
4492   case Instruction::SRem:
4493   case Instruction::URem:
4494     return mayDivideByZero(*I);
4495   }
4496   return false;
4497 }
4498 
4499 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
4500     Instruction *I, ElementCount VF) {
4501   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4502   assert(getWideningDecision(I, VF) == CM_Unknown &&
4503          "Decision should not be set yet.");
4504   auto *Group = getInterleavedAccessGroup(I);
4505   assert(Group && "Must have a group.");
4506 
4507   // If the instruction's allocated size doesn't equal it's type size, it
4508   // requires padding and will be scalarized.
4509   auto &DL = I->getModule()->getDataLayout();
4510   auto *ScalarTy = getLoadStoreType(I);
4511   if (hasIrregularType(ScalarTy, DL))
4512     return false;
4513 
4514   // If the group involves a non-integral pointer, we may not be able to
4515   // losslessly cast all values to a common type.
4516   unsigned InterleaveFactor = Group->getFactor();
4517   bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy);
4518   for (unsigned i = 0; i < InterleaveFactor; i++) {
4519     Instruction *Member = Group->getMember(i);
4520     if (!Member)
4521       continue;
4522     auto *MemberTy = getLoadStoreType(Member);
4523     bool MemberNI = DL.isNonIntegralPointerType(MemberTy);
4524     // Don't coerce non-integral pointers to integers or vice versa.
4525     if (MemberNI != ScalarNI) {
4526       // TODO: Consider adding special nullptr value case here
4527       return false;
4528     } else if (MemberNI && ScalarNI &&
4529                ScalarTy->getPointerAddressSpace() !=
4530                MemberTy->getPointerAddressSpace()) {
4531       return false;
4532     }
4533   }
4534 
4535   // Check if masking is required.
4536   // A Group may need masking for one of two reasons: it resides in a block that
4537   // needs predication, or it was decided to use masking to deal with gaps
4538   // (either a gap at the end of a load-access that may result in a speculative
4539   // load, or any gaps in a store-access).
4540   bool PredicatedAccessRequiresMasking =
4541       blockNeedsPredicationForAnyReason(I->getParent()) &&
4542       Legal->isMaskRequired(I);
4543   bool LoadAccessWithGapsRequiresEpilogMasking =
4544       isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
4545       !isScalarEpilogueAllowed();
4546   bool StoreAccessWithGapsRequiresMasking =
4547       isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor());
4548   if (!PredicatedAccessRequiresMasking &&
4549       !LoadAccessWithGapsRequiresEpilogMasking &&
4550       !StoreAccessWithGapsRequiresMasking)
4551     return true;
4552 
4553   // If masked interleaving is required, we expect that the user/target had
4554   // enabled it, because otherwise it either wouldn't have been created or
4555   // it should have been invalidated by the CostModel.
4556   assert(useMaskedInterleavedAccesses(TTI) &&
4557          "Masked interleave-groups for predicated accesses are not enabled.");
4558 
4559   if (Group->isReverse())
4560     return false;
4561 
4562   auto *Ty = getLoadStoreType(I);
4563   const Align Alignment = getLoadStoreAlignment(I);
4564   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4565                           : TTI.isLegalMaskedStore(Ty, Alignment);
4566 }
4567 
4568 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
4569     Instruction *I, ElementCount VF) {
4570   // Get and ensure we have a valid memory instruction.
4571   assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
4572 
4573   auto *Ptr = getLoadStorePointerOperand(I);
4574   auto *ScalarTy = getLoadStoreType(I);
4575 
4576   // In order to be widened, the pointer should be consecutive, first of all.
4577   if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
4578     return false;
4579 
4580   // If the instruction is a store located in a predicated block, it will be
4581   // scalarized.
4582   if (isScalarWithPredication(I, VF))
4583     return false;
4584 
4585   // If the instruction's allocated size doesn't equal it's type size, it
4586   // requires padding and will be scalarized.
4587   auto &DL = I->getModule()->getDataLayout();
4588   if (hasIrregularType(ScalarTy, DL))
4589     return false;
4590 
4591   return true;
4592 }
4593 
4594 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
4595   // We should not collect Uniforms more than once per VF. Right now,
4596   // this function is called from collectUniformsAndScalars(), which
4597   // already does this check. Collecting Uniforms for VF=1 does not make any
4598   // sense.
4599 
4600   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
4601          "This function should not be visited twice for the same VF");
4602 
4603   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4604   // not analyze again.  Uniforms.count(VF) will return 1.
4605   Uniforms[VF].clear();
4606 
4607   // We now know that the loop is vectorizable!
4608   // Collect instructions inside the loop that will remain uniform after
4609   // vectorization.
4610 
4611   // Global values, params and instructions outside of current loop are out of
4612   // scope.
4613   auto isOutOfScope = [&](Value *V) -> bool {
4614     Instruction *I = dyn_cast<Instruction>(V);
4615     return (!I || !TheLoop->contains(I));
4616   };
4617 
4618   // Worklist containing uniform instructions demanding lane 0.
4619   SetVector<Instruction *> Worklist;
4620   BasicBlock *Latch = TheLoop->getLoopLatch();
4621 
4622   // Add uniform instructions demanding lane 0 to the worklist. Instructions
4623   // that are scalar with predication must not be considered uniform after
4624   // vectorization, because that would create an erroneous replicating region
4625   // where only a single instance out of VF should be formed.
4626   // TODO: optimize such seldom cases if found important, see PR40816.
4627   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
4628     if (isOutOfScope(I)) {
4629       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
4630                         << *I << "\n");
4631       return;
4632     }
4633     if (isScalarWithPredication(I, VF)) {
4634       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
4635                         << *I << "\n");
4636       return;
4637     }
4638     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
4639     Worklist.insert(I);
4640   };
4641 
4642   // Start with the conditional branch. If the branch condition is an
4643   // instruction contained in the loop that is only used by the branch, it is
4644   // uniform.
4645   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4646   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
4647     addToWorklistIfAllowed(Cmp);
4648 
4649   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
4650     InstWidening WideningDecision = getWideningDecision(I, VF);
4651     assert(WideningDecision != CM_Unknown &&
4652            "Widening decision should be ready at this moment");
4653 
4654     // A uniform memory op is itself uniform.  We exclude uniform stores
4655     // here as they demand the last lane, not the first one.
4656     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
4657       assert(WideningDecision == CM_Scalarize);
4658       return true;
4659     }
4660 
4661     return (WideningDecision == CM_Widen ||
4662             WideningDecision == CM_Widen_Reverse ||
4663             WideningDecision == CM_Interleave);
4664   };
4665 
4666 
4667   // Returns true if Ptr is the pointer operand of a memory access instruction
4668   // I, and I is known to not require scalarization.
4669   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
4670     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
4671   };
4672 
4673   // Holds a list of values which are known to have at least one uniform use.
4674   // Note that there may be other uses which aren't uniform.  A "uniform use"
4675   // here is something which only demands lane 0 of the unrolled iterations;
4676   // it does not imply that all lanes produce the same value (e.g. this is not
4677   // the usual meaning of uniform)
4678   SetVector<Value *> HasUniformUse;
4679 
4680   // Scan the loop for instructions which are either a) known to have only
4681   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
4682   for (auto *BB : TheLoop->blocks())
4683     for (auto &I : *BB) {
4684       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
4685         switch (II->getIntrinsicID()) {
4686         case Intrinsic::sideeffect:
4687         case Intrinsic::experimental_noalias_scope_decl:
4688         case Intrinsic::assume:
4689         case Intrinsic::lifetime_start:
4690         case Intrinsic::lifetime_end:
4691           if (TheLoop->hasLoopInvariantOperands(&I))
4692             addToWorklistIfAllowed(&I);
4693           break;
4694         default:
4695           break;
4696         }
4697       }
4698 
4699       // ExtractValue instructions must be uniform, because the operands are
4700       // known to be loop-invariant.
4701       if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
4702         assert(isOutOfScope(EVI->getAggregateOperand()) &&
4703                "Expected aggregate value to be loop invariant");
4704         addToWorklistIfAllowed(EVI);
4705         continue;
4706       }
4707 
4708       // If there's no pointer operand, there's nothing to do.
4709       auto *Ptr = getLoadStorePointerOperand(&I);
4710       if (!Ptr)
4711         continue;
4712 
4713       // A uniform memory op is itself uniform.  We exclude uniform stores
4714       // here as they demand the last lane, not the first one.
4715       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
4716         addToWorklistIfAllowed(&I);
4717 
4718       if (isUniformDecision(&I, VF)) {
4719         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
4720         HasUniformUse.insert(Ptr);
4721       }
4722     }
4723 
4724   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
4725   // demanding) users.  Since loops are assumed to be in LCSSA form, this
4726   // disallows uses outside the loop as well.
4727   for (auto *V : HasUniformUse) {
4728     if (isOutOfScope(V))
4729       continue;
4730     auto *I = cast<Instruction>(V);
4731     auto UsersAreMemAccesses =
4732       llvm::all_of(I->users(), [&](User *U) -> bool {
4733         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
4734       });
4735     if (UsersAreMemAccesses)
4736       addToWorklistIfAllowed(I);
4737   }
4738 
4739   // Expand Worklist in topological order: whenever a new instruction
4740   // is added , its users should be already inside Worklist.  It ensures
4741   // a uniform instruction will only be used by uniform instructions.
4742   unsigned idx = 0;
4743   while (idx != Worklist.size()) {
4744     Instruction *I = Worklist[idx++];
4745 
4746     for (auto OV : I->operand_values()) {
4747       // isOutOfScope operands cannot be uniform instructions.
4748       if (isOutOfScope(OV))
4749         continue;
4750       // First order recurrence Phi's should typically be considered
4751       // non-uniform.
4752       auto *OP = dyn_cast<PHINode>(OV);
4753       if (OP && Legal->isFirstOrderRecurrence(OP))
4754         continue;
4755       // If all the users of the operand are uniform, then add the
4756       // operand into the uniform worklist.
4757       auto *OI = cast<Instruction>(OV);
4758       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
4759             auto *J = cast<Instruction>(U);
4760             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
4761           }))
4762         addToWorklistIfAllowed(OI);
4763     }
4764   }
4765 
4766   // For an instruction to be added into Worklist above, all its users inside
4767   // the loop should also be in Worklist. However, this condition cannot be
4768   // true for phi nodes that form a cyclic dependence. We must process phi
4769   // nodes separately. An induction variable will remain uniform if all users
4770   // of the induction variable and induction variable update remain uniform.
4771   // The code below handles both pointer and non-pointer induction variables.
4772   for (auto &Induction : Legal->getInductionVars()) {
4773     auto *Ind = Induction.first;
4774     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4775 
4776     // Determine if all users of the induction variable are uniform after
4777     // vectorization.
4778     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4779       auto *I = cast<Instruction>(U);
4780       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4781              isVectorizedMemAccessUse(I, Ind);
4782     });
4783     if (!UniformInd)
4784       continue;
4785 
4786     // Determine if all users of the induction variable update instruction are
4787     // uniform after vectorization.
4788     auto UniformIndUpdate =
4789         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4790           auto *I = cast<Instruction>(U);
4791           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4792                  isVectorizedMemAccessUse(I, IndUpdate);
4793         });
4794     if (!UniformIndUpdate)
4795       continue;
4796 
4797     // The induction variable and its update instruction will remain uniform.
4798     addToWorklistIfAllowed(Ind);
4799     addToWorklistIfAllowed(IndUpdate);
4800   }
4801 
4802   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
4803 }
4804 
4805 bool LoopVectorizationCostModel::runtimeChecksRequired() {
4806   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
4807 
4808   if (Legal->getRuntimePointerChecking()->Need) {
4809     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
4810         "runtime pointer checks needed. Enable vectorization of this "
4811         "loop with '#pragma clang loop vectorize(enable)' when "
4812         "compiling with -Os/-Oz",
4813         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4814     return true;
4815   }
4816 
4817   if (!PSE.getPredicate().isAlwaysTrue()) {
4818     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
4819         "runtime SCEV checks needed. Enable vectorization of this "
4820         "loop with '#pragma clang loop vectorize(enable)' when "
4821         "compiling with -Os/-Oz",
4822         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4823     return true;
4824   }
4825 
4826   // FIXME: Avoid specializing for stride==1 instead of bailing out.
4827   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
4828     reportVectorizationFailure("Runtime stride check for small trip count",
4829         "runtime stride == 1 checks needed. Enable vectorization of "
4830         "this loop without such check by compiling with -Os/-Oz",
4831         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4832     return true;
4833   }
4834 
4835   return false;
4836 }
4837 
4838 ElementCount
4839 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
4840   if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
4841     return ElementCount::getScalable(0);
4842 
4843   if (Hints->isScalableVectorizationDisabled()) {
4844     reportVectorizationInfo("Scalable vectorization is explicitly disabled",
4845                             "ScalableVectorizationDisabled", ORE, TheLoop);
4846     return ElementCount::getScalable(0);
4847   }
4848 
4849   LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
4850 
4851   auto MaxScalableVF = ElementCount::getScalable(
4852       std::numeric_limits<ElementCount::ScalarTy>::max());
4853 
4854   // Test that the loop-vectorizer can legalize all operations for this MaxVF.
4855   // FIXME: While for scalable vectors this is currently sufficient, this should
4856   // be replaced by a more detailed mechanism that filters out specific VFs,
4857   // instead of invalidating vectorization for a whole set of VFs based on the
4858   // MaxVF.
4859 
4860   // Disable scalable vectorization if the loop contains unsupported reductions.
4861   if (!canVectorizeReductions(MaxScalableVF)) {
4862     reportVectorizationInfo(
4863         "Scalable vectorization not supported for the reduction "
4864         "operations found in this loop.",
4865         "ScalableVFUnfeasible", ORE, TheLoop);
4866     return ElementCount::getScalable(0);
4867   }
4868 
4869   // Disable scalable vectorization if the loop contains any instructions
4870   // with element types not supported for scalable vectors.
4871   if (any_of(ElementTypesInLoop, [&](Type *Ty) {
4872         return !Ty->isVoidTy() &&
4873                !this->TTI.isElementTypeLegalForScalableVector(Ty);
4874       })) {
4875     reportVectorizationInfo("Scalable vectorization is not supported "
4876                             "for all element types found in this loop.",
4877                             "ScalableVFUnfeasible", ORE, TheLoop);
4878     return ElementCount::getScalable(0);
4879   }
4880 
4881   if (Legal->isSafeForAnyVectorWidth())
4882     return MaxScalableVF;
4883 
4884   // Limit MaxScalableVF by the maximum safe dependence distance.
4885   Optional<unsigned> MaxVScale = TTI.getMaxVScale();
4886   if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange))
4887     MaxVScale =
4888         TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
4889   MaxScalableVF = ElementCount::getScalable(
4890       MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
4891   if (!MaxScalableVF)
4892     reportVectorizationInfo(
4893         "Max legal vector width too small, scalable vectorization "
4894         "unfeasible.",
4895         "ScalableVFUnfeasible", ORE, TheLoop);
4896 
4897   return MaxScalableVF;
4898 }
4899 
4900 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
4901     unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) {
4902   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
4903   unsigned SmallestType, WidestType;
4904   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
4905 
4906   // Get the maximum safe dependence distance in bits computed by LAA.
4907   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
4908   // the memory accesses that is most restrictive (involved in the smallest
4909   // dependence distance).
4910   unsigned MaxSafeElements =
4911       PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
4912 
4913   auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
4914   auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
4915 
4916   LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
4917                     << ".\n");
4918   LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
4919                     << ".\n");
4920 
4921   // First analyze the UserVF, fall back if the UserVF should be ignored.
4922   if (UserVF) {
4923     auto MaxSafeUserVF =
4924         UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
4925 
4926     if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
4927       // If `VF=vscale x N` is safe, then so is `VF=N`
4928       if (UserVF.isScalable())
4929         return FixedScalableVFPair(
4930             ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
4931       else
4932         return UserVF;
4933     }
4934 
4935     assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
4936 
4937     // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
4938     // is better to ignore the hint and let the compiler choose a suitable VF.
4939     if (!UserVF.isScalable()) {
4940       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
4941                         << " is unsafe, clamping to max safe VF="
4942                         << MaxSafeFixedVF << ".\n");
4943       ORE->emit([&]() {
4944         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
4945                                           TheLoop->getStartLoc(),
4946                                           TheLoop->getHeader())
4947                << "User-specified vectorization factor "
4948                << ore::NV("UserVectorizationFactor", UserVF)
4949                << " is unsafe, clamping to maximum safe vectorization factor "
4950                << ore::NV("VectorizationFactor", MaxSafeFixedVF);
4951       });
4952       return MaxSafeFixedVF;
4953     }
4954 
4955     if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
4956       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
4957                         << " is ignored because scalable vectors are not "
4958                            "available.\n");
4959       ORE->emit([&]() {
4960         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
4961                                           TheLoop->getStartLoc(),
4962                                           TheLoop->getHeader())
4963                << "User-specified vectorization factor "
4964                << ore::NV("UserVectorizationFactor", UserVF)
4965                << " is ignored because the target does not support scalable "
4966                   "vectors. The compiler will pick a more suitable value.";
4967       });
4968     } else {
4969       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
4970                         << " is unsafe. Ignoring scalable UserVF.\n");
4971       ORE->emit([&]() {
4972         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
4973                                           TheLoop->getStartLoc(),
4974                                           TheLoop->getHeader())
4975                << "User-specified vectorization factor "
4976                << ore::NV("UserVectorizationFactor", UserVF)
4977                << " is unsafe. Ignoring the hint to let the compiler pick a "
4978                   "more suitable value.";
4979       });
4980     }
4981   }
4982 
4983   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
4984                     << " / " << WidestType << " bits.\n");
4985 
4986   FixedScalableVFPair Result(ElementCount::getFixed(1),
4987                              ElementCount::getScalable(0));
4988   if (auto MaxVF =
4989           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
4990                                   MaxSafeFixedVF, FoldTailByMasking))
4991     Result.FixedVF = MaxVF;
4992 
4993   if (auto MaxVF =
4994           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
4995                                   MaxSafeScalableVF, FoldTailByMasking))
4996     if (MaxVF.isScalable()) {
4997       Result.ScalableVF = MaxVF;
4998       LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
4999                         << "\n");
5000     }
5001 
5002   return Result;
5003 }
5004 
5005 FixedScalableVFPair
5006 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5007   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5008     // TODO: It may by useful to do since it's still likely to be dynamically
5009     // uniform if the target can skip.
5010     reportVectorizationFailure(
5011         "Not inserting runtime ptr check for divergent target",
5012         "runtime pointer checks needed. Not enabled for divergent target",
5013         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5014     return FixedScalableVFPair::getNone();
5015   }
5016 
5017   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5018   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5019   if (TC == 1) {
5020     reportVectorizationFailure("Single iteration (non) loop",
5021         "loop trip count is one, irrelevant for vectorization",
5022         "SingleIterationLoop", ORE, TheLoop);
5023     return FixedScalableVFPair::getNone();
5024   }
5025 
5026   switch (ScalarEpilogueStatus) {
5027   case CM_ScalarEpilogueAllowed:
5028     return computeFeasibleMaxVF(TC, UserVF, false);
5029   case CM_ScalarEpilogueNotAllowedUsePredicate:
5030     LLVM_FALLTHROUGH;
5031   case CM_ScalarEpilogueNotNeededUsePredicate:
5032     LLVM_DEBUG(
5033         dbgs() << "LV: vector predicate hint/switch found.\n"
5034                << "LV: Not allowing scalar epilogue, creating predicated "
5035                << "vector loop.\n");
5036     break;
5037   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5038     // fallthrough as a special case of OptForSize
5039   case CM_ScalarEpilogueNotAllowedOptSize:
5040     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5041       LLVM_DEBUG(
5042           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5043     else
5044       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5045                         << "count.\n");
5046 
5047     // Bail if runtime checks are required, which are not good when optimising
5048     // for size.
5049     if (runtimeChecksRequired())
5050       return FixedScalableVFPair::getNone();
5051 
5052     break;
5053   }
5054 
5055   // The only loops we can vectorize without a scalar epilogue, are loops with
5056   // a bottom-test and a single exiting block. We'd have to handle the fact
5057   // that not every instruction executes on the last iteration.  This will
5058   // require a lane mask which varies through the vector loop body.  (TODO)
5059   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5060     // If there was a tail-folding hint/switch, but we can't fold the tail by
5061     // masking, fallback to a vectorization with a scalar epilogue.
5062     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5063       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5064                            "scalar epilogue instead.\n");
5065       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5066       return computeFeasibleMaxVF(TC, UserVF, false);
5067     }
5068     return FixedScalableVFPair::getNone();
5069   }
5070 
5071   // Now try the tail folding
5072 
5073   // Invalidate interleave groups that require an epilogue if we can't mask
5074   // the interleave-group.
5075   if (!useMaskedInterleavedAccesses(TTI)) {
5076     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5077            "No decisions should have been taken at this point");
5078     // Note: There is no need to invalidate any cost modeling decisions here, as
5079     // non where taken so far.
5080     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5081   }
5082 
5083   FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true);
5084   // Avoid tail folding if the trip count is known to be a multiple of any VF
5085   // we chose.
5086   // FIXME: The condition below pessimises the case for fixed-width vectors,
5087   // when scalable VFs are also candidates for vectorization.
5088   if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
5089     ElementCount MaxFixedVF = MaxFactors.FixedVF;
5090     assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
5091            "MaxFixedVF must be a power of 2");
5092     unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
5093                                    : MaxFixedVF.getFixedValue();
5094     ScalarEvolution *SE = PSE.getSE();
5095     const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5096     const SCEV *ExitCount = SE->getAddExpr(
5097         BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5098     const SCEV *Rem = SE->getURemExpr(
5099         SE->applyLoopGuards(ExitCount, TheLoop),
5100         SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5101     if (Rem->isZero()) {
5102       // Accept MaxFixedVF if we do not have a tail.
5103       LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5104       return MaxFactors;
5105     }
5106   }
5107 
5108   // For scalable vectors don't use tail folding for low trip counts or
5109   // optimizing for code size. We only permit this if the user has explicitly
5110   // requested it.
5111   if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate &&
5112       ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate &&
5113       MaxFactors.ScalableVF.isVector())
5114     MaxFactors.ScalableVF = ElementCount::getScalable(0);
5115 
5116   // If we don't know the precise trip count, or if the trip count that we
5117   // found modulo the vectorization factor is not zero, try to fold the tail
5118   // by masking.
5119   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5120   if (Legal->prepareToFoldTailByMasking()) {
5121     FoldTailByMasking = true;
5122     return MaxFactors;
5123   }
5124 
5125   // If there was a tail-folding hint/switch, but we can't fold the tail by
5126   // masking, fallback to a vectorization with a scalar epilogue.
5127   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5128     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5129                          "scalar epilogue instead.\n");
5130     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5131     return MaxFactors;
5132   }
5133 
5134   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5135     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5136     return FixedScalableVFPair::getNone();
5137   }
5138 
5139   if (TC == 0) {
5140     reportVectorizationFailure(
5141         "Unable to calculate the loop count due to complex control flow",
5142         "unable to calculate the loop count due to complex control flow",
5143         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5144     return FixedScalableVFPair::getNone();
5145   }
5146 
5147   reportVectorizationFailure(
5148       "Cannot optimize for size and vectorize at the same time.",
5149       "cannot optimize for size and vectorize at the same time. "
5150       "Enable vectorization of this loop with '#pragma clang loop "
5151       "vectorize(enable)' when compiling with -Os/-Oz",
5152       "NoTailLoopWithOptForSize", ORE, TheLoop);
5153   return FixedScalableVFPair::getNone();
5154 }
5155 
5156 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
5157     unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
5158     const ElementCount &MaxSafeVF, bool FoldTailByMasking) {
5159   bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
5160   TypeSize WidestRegister = TTI.getRegisterBitWidth(
5161       ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
5162                            : TargetTransformInfo::RGK_FixedWidthVector);
5163 
5164   // Convenience function to return the minimum of two ElementCounts.
5165   auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
5166     assert((LHS.isScalable() == RHS.isScalable()) &&
5167            "Scalable flags must match");
5168     return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
5169   };
5170 
5171   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5172   // Note that both WidestRegister and WidestType may not be a powers of 2.
5173   auto MaxVectorElementCount = ElementCount::get(
5174       PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
5175       ComputeScalableMaxVF);
5176   MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
5177   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5178                     << (MaxVectorElementCount * WidestType) << " bits.\n");
5179 
5180   if (!MaxVectorElementCount) {
5181     LLVM_DEBUG(dbgs() << "LV: The target has no "
5182                       << (ComputeScalableMaxVF ? "scalable" : "fixed")
5183                       << " vector registers.\n");
5184     return ElementCount::getFixed(1);
5185   }
5186 
5187   const auto TripCountEC = ElementCount::getFixed(ConstTripCount);
5188   if (ConstTripCount &&
5189       ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) &&
5190       (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) {
5191     // If loop trip count (TC) is known at compile time there is no point in
5192     // choosing VF greater than TC (as done in the loop below). Select maximum
5193     // power of two which doesn't exceed TC.
5194     // If MaxVectorElementCount is scalable, we only fall back on a fixed VF
5195     // when the TC is less than or equal to the known number of lanes.
5196     auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount);
5197     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
5198                          "exceeding the constant trip count: "
5199                       << ClampedConstTripCount << "\n");
5200     return ElementCount::getFixed(ClampedConstTripCount);
5201   }
5202 
5203   ElementCount MaxVF = MaxVectorElementCount;
5204   if (MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 &&
5205                             TTI.shouldMaximizeVectorBandwidth())) {
5206     auto MaxVectorElementCountMaxBW = ElementCount::get(
5207         PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
5208         ComputeScalableMaxVF);
5209     MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
5210 
5211     // Collect all viable vectorization factors larger than the default MaxVF
5212     // (i.e. MaxVectorElementCount).
5213     SmallVector<ElementCount, 8> VFs;
5214     for (ElementCount VS = MaxVectorElementCount * 2;
5215          ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
5216       VFs.push_back(VS);
5217 
5218     // For each VF calculate its register usage.
5219     auto RUs = calculateRegisterUsage(VFs);
5220 
5221     // Select the largest VF which doesn't require more registers than existing
5222     // ones.
5223     for (int i = RUs.size() - 1; i >= 0; --i) {
5224       bool Selected = true;
5225       for (auto &pair : RUs[i].MaxLocalUsers) {
5226         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5227         if (pair.second > TargetNumRegisters)
5228           Selected = false;
5229       }
5230       if (Selected) {
5231         MaxVF = VFs[i];
5232         break;
5233       }
5234     }
5235     if (ElementCount MinVF =
5236             TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
5237       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5238         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5239                           << ") with target's minimum: " << MinVF << '\n');
5240         MaxVF = MinVF;
5241       }
5242     }
5243 
5244     // Invalidate any widening decisions we might have made, in case the loop
5245     // requires prediction (decided later), but we have already made some
5246     // load/store widening decisions.
5247     invalidateCostModelingDecisions();
5248   }
5249   return MaxVF;
5250 }
5251 
5252 Optional<unsigned> LoopVectorizationCostModel::getVScaleForTuning() const {
5253   if (TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
5254     auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange);
5255     auto Min = Attr.getVScaleRangeMin();
5256     auto Max = Attr.getVScaleRangeMax();
5257     if (Max && Min == Max)
5258       return Max;
5259   }
5260 
5261   return TTI.getVScaleForTuning();
5262 }
5263 
5264 bool LoopVectorizationCostModel::isMoreProfitable(
5265     const VectorizationFactor &A, const VectorizationFactor &B) const {
5266   InstructionCost CostA = A.Cost;
5267   InstructionCost CostB = B.Cost;
5268 
5269   unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
5270 
5271   if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
5272       MaxTripCount) {
5273     // If we are folding the tail and the trip count is a known (possibly small)
5274     // constant, the trip count will be rounded up to an integer number of
5275     // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
5276     // which we compare directly. When not folding the tail, the total cost will
5277     // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
5278     // approximated with the per-lane cost below instead of using the tripcount
5279     // as here.
5280     auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
5281     auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
5282     return RTCostA < RTCostB;
5283   }
5284 
5285   // Improve estimate for the vector width if it is scalable.
5286   unsigned EstimatedWidthA = A.Width.getKnownMinValue();
5287   unsigned EstimatedWidthB = B.Width.getKnownMinValue();
5288   if (Optional<unsigned> VScale = getVScaleForTuning()) {
5289     if (A.Width.isScalable())
5290       EstimatedWidthA *= VScale.getValue();
5291     if (B.Width.isScalable())
5292       EstimatedWidthB *= VScale.getValue();
5293   }
5294 
5295   // Assume vscale may be larger than 1 (or the value being tuned for),
5296   // so that scalable vectorization is slightly favorable over fixed-width
5297   // vectorization.
5298   if (A.Width.isScalable() && !B.Width.isScalable())
5299     return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA);
5300 
5301   // To avoid the need for FP division:
5302   //      (CostA / A.Width) < (CostB / B.Width)
5303   // <=>  (CostA * B.Width) < (CostB * A.Width)
5304   return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA);
5305 }
5306 
5307 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
5308     const ElementCountSet &VFCandidates) {
5309   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
5310   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
5311   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
5312   assert(VFCandidates.count(ElementCount::getFixed(1)) &&
5313          "Expected Scalar VF to be a candidate");
5314 
5315   const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost);
5316   VectorizationFactor ChosenFactor = ScalarCost;
5317 
5318   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5319   if (ForceVectorization && VFCandidates.size() > 1) {
5320     // Ignore scalar width, because the user explicitly wants vectorization.
5321     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5322     // evaluation.
5323     ChosenFactor.Cost = InstructionCost::getMax();
5324   }
5325 
5326   SmallVector<InstructionVFPair> InvalidCosts;
5327   for (const auto &i : VFCandidates) {
5328     // The cost for scalar VF=1 is already calculated, so ignore it.
5329     if (i.isScalar())
5330       continue;
5331 
5332     VectorizationCostTy C = expectedCost(i, &InvalidCosts);
5333     VectorizationFactor Candidate(i, C.first);
5334 
5335 #ifndef NDEBUG
5336     unsigned AssumedMinimumVscale = 1;
5337     if (Optional<unsigned> VScale = getVScaleForTuning())
5338       AssumedMinimumVscale = VScale.getValue();
5339     unsigned Width =
5340         Candidate.Width.isScalable()
5341             ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
5342             : Candidate.Width.getFixedValue();
5343     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5344                       << " costs: " << (Candidate.Cost / Width));
5345     if (i.isScalable())
5346       LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
5347                         << AssumedMinimumVscale << ")");
5348     LLVM_DEBUG(dbgs() << ".\n");
5349 #endif
5350 
5351     if (!C.second && !ForceVectorization) {
5352       LLVM_DEBUG(
5353           dbgs() << "LV: Not considering vector loop of width " << i
5354                  << " because it will not generate any vector instructions.\n");
5355       continue;
5356     }
5357 
5358     // If profitable add it to ProfitableVF list.
5359     if (isMoreProfitable(Candidate, ScalarCost))
5360       ProfitableVFs.push_back(Candidate);
5361 
5362     if (isMoreProfitable(Candidate, ChosenFactor))
5363       ChosenFactor = Candidate;
5364   }
5365 
5366   // Emit a report of VFs with invalid costs in the loop.
5367   if (!InvalidCosts.empty()) {
5368     // Group the remarks per instruction, keeping the instruction order from
5369     // InvalidCosts.
5370     std::map<Instruction *, unsigned> Numbering;
5371     unsigned I = 0;
5372     for (auto &Pair : InvalidCosts)
5373       if (!Numbering.count(Pair.first))
5374         Numbering[Pair.first] = I++;
5375 
5376     // Sort the list, first on instruction(number) then on VF.
5377     llvm::sort(InvalidCosts,
5378                [&Numbering](InstructionVFPair &A, InstructionVFPair &B) {
5379                  if (Numbering[A.first] != Numbering[B.first])
5380                    return Numbering[A.first] < Numbering[B.first];
5381                  ElementCountComparator ECC;
5382                  return ECC(A.second, B.second);
5383                });
5384 
5385     // For a list of ordered instruction-vf pairs:
5386     //   [(load, vf1), (load, vf2), (store, vf1)]
5387     // Group the instructions together to emit separate remarks for:
5388     //   load  (vf1, vf2)
5389     //   store (vf1)
5390     auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts);
5391     auto Subset = ArrayRef<InstructionVFPair>();
5392     do {
5393       if (Subset.empty())
5394         Subset = Tail.take_front(1);
5395 
5396       Instruction *I = Subset.front().first;
5397 
5398       // If the next instruction is different, or if there are no other pairs,
5399       // emit a remark for the collated subset. e.g.
5400       //   [(load, vf1), (load, vf2))]
5401       // to emit:
5402       //  remark: invalid costs for 'load' at VF=(vf, vf2)
5403       if (Subset == Tail || Tail[Subset.size()].first != I) {
5404         std::string OutString;
5405         raw_string_ostream OS(OutString);
5406         assert(!Subset.empty() && "Unexpected empty range");
5407         OS << "Instruction with invalid costs prevented vectorization at VF=(";
5408         for (auto &Pair : Subset)
5409           OS << (Pair.second == Subset.front().second ? "" : ", ")
5410              << Pair.second;
5411         OS << "):";
5412         if (auto *CI = dyn_cast<CallInst>(I))
5413           OS << " call to " << CI->getCalledFunction()->getName();
5414         else
5415           OS << " " << I->getOpcodeName();
5416         OS.flush();
5417         reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I);
5418         Tail = Tail.drop_front(Subset.size());
5419         Subset = {};
5420       } else
5421         // Grow the subset by one element
5422         Subset = Tail.take_front(Subset.size() + 1);
5423     } while (!Tail.empty());
5424   }
5425 
5426   if (!EnableCondStoresVectorization && NumPredStores) {
5427     reportVectorizationFailure("There are conditional stores.",
5428         "store that is conditionally executed prevents vectorization",
5429         "ConditionalStore", ORE, TheLoop);
5430     ChosenFactor = ScalarCost;
5431   }
5432 
5433   LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
5434                  ChosenFactor.Cost >= ScalarCost.Cost) dbgs()
5435              << "LV: Vectorization seems to be not beneficial, "
5436              << "but was forced by a user.\n");
5437   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
5438   return ChosenFactor;
5439 }
5440 
5441 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
5442     const Loop &L, ElementCount VF) const {
5443   // Cross iteration phis such as reductions need special handling and are
5444   // currently unsupported.
5445   if (any_of(L.getHeader()->phis(),
5446              [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); }))
5447     return false;
5448 
5449   // Phis with uses outside of the loop require special handling and are
5450   // currently unsupported.
5451   for (auto &Entry : Legal->getInductionVars()) {
5452     // Look for uses of the value of the induction at the last iteration.
5453     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
5454     for (User *U : PostInc->users())
5455       if (!L.contains(cast<Instruction>(U)))
5456         return false;
5457     // Look for uses of penultimate value of the induction.
5458     for (User *U : Entry.first->users())
5459       if (!L.contains(cast<Instruction>(U)))
5460         return false;
5461   }
5462 
5463   // Induction variables that are widened require special handling that is
5464   // currently not supported.
5465   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
5466         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
5467                  this->isProfitableToScalarize(Entry.first, VF));
5468       }))
5469     return false;
5470 
5471   // Epilogue vectorization code has not been auditted to ensure it handles
5472   // non-latch exits properly.  It may be fine, but it needs auditted and
5473   // tested.
5474   if (L.getExitingBlock() != L.getLoopLatch())
5475     return false;
5476 
5477   return true;
5478 }
5479 
5480 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
5481     const ElementCount VF) const {
5482   // FIXME: We need a much better cost-model to take different parameters such
5483   // as register pressure, code size increase and cost of extra branches into
5484   // account. For now we apply a very crude heuristic and only consider loops
5485   // with vectorization factors larger than a certain value.
5486   // We also consider epilogue vectorization unprofitable for targets that don't
5487   // consider interleaving beneficial (eg. MVE).
5488   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
5489     return false;
5490   // FIXME: We should consider changing the threshold for scalable
5491   // vectors to take VScaleForTuning into account.
5492   if (VF.getKnownMinValue() >= EpilogueVectorizationMinVF)
5493     return true;
5494   return false;
5495 }
5496 
5497 VectorizationFactor
5498 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
5499     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
5500   VectorizationFactor Result = VectorizationFactor::Disabled();
5501   if (!EnableEpilogueVectorization) {
5502     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
5503     return Result;
5504   }
5505 
5506   if (!isScalarEpilogueAllowed()) {
5507     LLVM_DEBUG(
5508         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
5509                   "allowed.\n";);
5510     return Result;
5511   }
5512 
5513   // Not really a cost consideration, but check for unsupported cases here to
5514   // simplify the logic.
5515   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
5516     LLVM_DEBUG(
5517         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
5518                   "not a supported candidate.\n";);
5519     return Result;
5520   }
5521 
5522   if (EpilogueVectorizationForceVF > 1) {
5523     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
5524     ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF);
5525     if (LVP.hasPlanWithVF(ForcedEC))
5526       return {ForcedEC, 0};
5527     else {
5528       LLVM_DEBUG(
5529           dbgs()
5530               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
5531       return Result;
5532     }
5533   }
5534 
5535   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
5536       TheLoop->getHeader()->getParent()->hasMinSize()) {
5537     LLVM_DEBUG(
5538         dbgs()
5539             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
5540     return Result;
5541   }
5542 
5543   if (!isEpilogueVectorizationProfitable(MainLoopVF)) {
5544     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
5545                          "this loop\n");
5546     return Result;
5547   }
5548 
5549   // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
5550   // the main loop handles 8 lanes per iteration. We could still benefit from
5551   // vectorizing the epilogue loop with VF=4.
5552   ElementCount EstimatedRuntimeVF = MainLoopVF;
5553   if (MainLoopVF.isScalable()) {
5554     EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue());
5555     if (Optional<unsigned> VScale = getVScaleForTuning())
5556       EstimatedRuntimeVF *= VScale.getValue();
5557   }
5558 
5559   for (auto &NextVF : ProfitableVFs)
5560     if (((!NextVF.Width.isScalable() && MainLoopVF.isScalable() &&
5561           ElementCount::isKnownLT(NextVF.Width, EstimatedRuntimeVF)) ||
5562          ElementCount::isKnownLT(NextVF.Width, MainLoopVF)) &&
5563         (Result.Width.isScalar() || isMoreProfitable(NextVF, Result)) &&
5564         LVP.hasPlanWithVF(NextVF.Width))
5565       Result = NextVF;
5566 
5567   if (Result != VectorizationFactor::Disabled())
5568     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
5569                       << Result.Width << "\n";);
5570   return Result;
5571 }
5572 
5573 std::pair<unsigned, unsigned>
5574 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5575   unsigned MinWidth = -1U;
5576   unsigned MaxWidth = 8;
5577   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5578   // For in-loop reductions, no element types are added to ElementTypesInLoop
5579   // if there are no loads/stores in the loop. In this case, check through the
5580   // reduction variables to determine the maximum width.
5581   if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
5582     // Reset MaxWidth so that we can find the smallest type used by recurrences
5583     // in the loop.
5584     MaxWidth = -1U;
5585     for (auto &PhiDescriptorPair : Legal->getReductionVars()) {
5586       const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
5587       // When finding the min width used by the recurrence we need to account
5588       // for casts on the input operands of the recurrence.
5589       MaxWidth = std::min<unsigned>(
5590           MaxWidth, std::min<unsigned>(
5591                         RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
5592                         RdxDesc.getRecurrenceType()->getScalarSizeInBits()));
5593     }
5594   } else {
5595     for (Type *T : ElementTypesInLoop) {
5596       MinWidth = std::min<unsigned>(
5597           MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5598       MaxWidth = std::max<unsigned>(
5599           MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5600     }
5601   }
5602   return {MinWidth, MaxWidth};
5603 }
5604 
5605 void LoopVectorizationCostModel::collectElementTypesForWidening() {
5606   ElementTypesInLoop.clear();
5607   // For each block.
5608   for (BasicBlock *BB : TheLoop->blocks()) {
5609     // For each instruction in the loop.
5610     for (Instruction &I : BB->instructionsWithoutDebug()) {
5611       Type *T = I.getType();
5612 
5613       // Skip ignored values.
5614       if (ValuesToIgnore.count(&I))
5615         continue;
5616 
5617       // Only examine Loads, Stores and PHINodes.
5618       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5619         continue;
5620 
5621       // Examine PHI nodes that are reduction variables. Update the type to
5622       // account for the recurrence type.
5623       if (auto *PN = dyn_cast<PHINode>(&I)) {
5624         if (!Legal->isReductionVariable(PN))
5625           continue;
5626         const RecurrenceDescriptor &RdxDesc =
5627             Legal->getReductionVars().find(PN)->second;
5628         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
5629             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
5630                                       RdxDesc.getRecurrenceType(),
5631                                       TargetTransformInfo::ReductionFlags()))
5632           continue;
5633         T = RdxDesc.getRecurrenceType();
5634       }
5635 
5636       // Examine the stored values.
5637       if (auto *ST = dyn_cast<StoreInst>(&I))
5638         T = ST->getValueOperand()->getType();
5639 
5640       assert(T->isSized() &&
5641              "Expected the load/store/recurrence type to be sized");
5642 
5643       ElementTypesInLoop.insert(T);
5644     }
5645   }
5646 }
5647 
5648 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
5649                                                            unsigned LoopCost) {
5650   // -- The interleave heuristics --
5651   // We interleave the loop in order to expose ILP and reduce the loop overhead.
5652   // There are many micro-architectural considerations that we can't predict
5653   // at this level. For example, frontend pressure (on decode or fetch) due to
5654   // code size, or the number and capabilities of the execution ports.
5655   //
5656   // We use the following heuristics to select the interleave count:
5657   // 1. If the code has reductions, then we interleave to break the cross
5658   // iteration dependency.
5659   // 2. If the loop is really small, then we interleave to reduce the loop
5660   // overhead.
5661   // 3. We don't interleave if we think that we will spill registers to memory
5662   // due to the increased register pressure.
5663 
5664   if (!isScalarEpilogueAllowed())
5665     return 1;
5666 
5667   // We used the distance for the interleave count.
5668   if (Legal->getMaxSafeDepDistBytes() != -1U)
5669     return 1;
5670 
5671   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
5672   const bool HasReductions = !Legal->getReductionVars().empty();
5673   // Do not interleave loops with a relatively small known or estimated trip
5674   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
5675   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
5676   // because with the above conditions interleaving can expose ILP and break
5677   // cross iteration dependences for reductions.
5678   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
5679       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
5680     return 1;
5681 
5682   // If we did not calculate the cost for VF (because the user selected the VF)
5683   // then we calculate the cost of VF here.
5684   if (LoopCost == 0) {
5685     InstructionCost C = expectedCost(VF).first;
5686     assert(C.isValid() && "Expected to have chosen a VF with valid cost");
5687     LoopCost = *C.getValue();
5688 
5689     // Loop body is free and there is no need for interleaving.
5690     if (LoopCost == 0)
5691       return 1;
5692   }
5693 
5694   RegisterUsage R = calculateRegisterUsage({VF})[0];
5695   // We divide by these constants so assume that we have at least one
5696   // instruction that uses at least one register.
5697   for (auto& pair : R.MaxLocalUsers) {
5698     pair.second = std::max(pair.second, 1U);
5699   }
5700 
5701   // We calculate the interleave count using the following formula.
5702   // Subtract the number of loop invariants from the number of available
5703   // registers. These registers are used by all of the interleaved instances.
5704   // Next, divide the remaining registers by the number of registers that is
5705   // required by the loop, in order to estimate how many parallel instances
5706   // fit without causing spills. All of this is rounded down if necessary to be
5707   // a power of two. We want power of two interleave count to simplify any
5708   // addressing operations or alignment considerations.
5709   // We also want power of two interleave counts to ensure that the induction
5710   // variable of the vector loop wraps to zero, when tail is folded by masking;
5711   // this currently happens when OptForSize, in which case IC is set to 1 above.
5712   unsigned IC = UINT_MAX;
5713 
5714   for (auto& pair : R.MaxLocalUsers) {
5715     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5716     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
5717                       << " registers of "
5718                       << TTI.getRegisterClassName(pair.first) << " register class\n");
5719     if (VF.isScalar()) {
5720       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
5721         TargetNumRegisters = ForceTargetNumScalarRegs;
5722     } else {
5723       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
5724         TargetNumRegisters = ForceTargetNumVectorRegs;
5725     }
5726     unsigned MaxLocalUsers = pair.second;
5727     unsigned LoopInvariantRegs = 0;
5728     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
5729       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
5730 
5731     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
5732     // Don't count the induction variable as interleaved.
5733     if (EnableIndVarRegisterHeur) {
5734       TmpIC =
5735           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
5736                         std::max(1U, (MaxLocalUsers - 1)));
5737     }
5738 
5739     IC = std::min(IC, TmpIC);
5740   }
5741 
5742   // Clamp the interleave ranges to reasonable counts.
5743   unsigned MaxInterleaveCount =
5744       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
5745 
5746   // Check if the user has overridden the max.
5747   if (VF.isScalar()) {
5748     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
5749       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
5750   } else {
5751     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
5752       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
5753   }
5754 
5755   // If trip count is known or estimated compile time constant, limit the
5756   // interleave count to be less than the trip count divided by VF, provided it
5757   // is at least 1.
5758   //
5759   // For scalable vectors we can't know if interleaving is beneficial. It may
5760   // not be beneficial for small loops if none of the lanes in the second vector
5761   // iterations is enabled. However, for larger loops, there is likely to be a
5762   // similar benefit as for fixed-width vectors. For now, we choose to leave
5763   // the InterleaveCount as if vscale is '1', although if some information about
5764   // the vector is known (e.g. min vector size), we can make a better decision.
5765   if (BestKnownTC) {
5766     MaxInterleaveCount =
5767         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
5768     // Make sure MaxInterleaveCount is greater than 0.
5769     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
5770   }
5771 
5772   assert(MaxInterleaveCount > 0 &&
5773          "Maximum interleave count must be greater than 0");
5774 
5775   // Clamp the calculated IC to be between the 1 and the max interleave count
5776   // that the target and trip count allows.
5777   if (IC > MaxInterleaveCount)
5778     IC = MaxInterleaveCount;
5779   else
5780     // Make sure IC is greater than 0.
5781     IC = std::max(1u, IC);
5782 
5783   assert(IC > 0 && "Interleave count must be greater than 0.");
5784 
5785   // Interleave if we vectorized this loop and there is a reduction that could
5786   // benefit from interleaving.
5787   if (VF.isVector() && HasReductions) {
5788     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
5789     return IC;
5790   }
5791 
5792   // For any scalar loop that either requires runtime checks or predication we
5793   // are better off leaving this to the unroller. Note that if we've already
5794   // vectorized the loop we will have done the runtime check and so interleaving
5795   // won't require further checks.
5796   bool ScalarInterleavingRequiresPredication =
5797       (VF.isScalar() && any_of(TheLoop->blocks(), [this](BasicBlock *BB) {
5798          return Legal->blockNeedsPredication(BB);
5799        }));
5800   bool ScalarInterleavingRequiresRuntimePointerCheck =
5801       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
5802 
5803   // We want to interleave small loops in order to reduce the loop overhead and
5804   // potentially expose ILP opportunities.
5805   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
5806                     << "LV: IC is " << IC << '\n'
5807                     << "LV: VF is " << VF << '\n');
5808   const bool AggressivelyInterleaveReductions =
5809       TTI.enableAggressiveInterleaving(HasReductions);
5810   if (!ScalarInterleavingRequiresRuntimePointerCheck &&
5811       !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) {
5812     // We assume that the cost overhead is 1 and we use the cost model
5813     // to estimate the cost of the loop and interleave until the cost of the
5814     // loop overhead is about 5% of the cost of the loop.
5815     unsigned SmallIC =
5816         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
5817 
5818     // Interleave until store/load ports (estimated by max interleave count) are
5819     // saturated.
5820     unsigned NumStores = Legal->getNumStores();
5821     unsigned NumLoads = Legal->getNumLoads();
5822     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5823     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5824 
5825     // There is little point in interleaving for reductions containing selects
5826     // and compares when VF=1 since it may just create more overhead than it's
5827     // worth for loops with small trip counts. This is because we still have to
5828     // do the final reduction after the loop.
5829     bool HasSelectCmpReductions =
5830         HasReductions &&
5831         any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
5832           const RecurrenceDescriptor &RdxDesc = Reduction.second;
5833           return RecurrenceDescriptor::isSelectCmpRecurrenceKind(
5834               RdxDesc.getRecurrenceKind());
5835         });
5836     if (HasSelectCmpReductions) {
5837       LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
5838       return 1;
5839     }
5840 
5841     // If we have a scalar reduction (vector reductions are already dealt with
5842     // by this point), we can increase the critical path length if the loop
5843     // we're interleaving is inside another loop. For tree-wise reductions
5844     // set the limit to 2, and for ordered reductions it's best to disable
5845     // interleaving entirely.
5846     if (HasReductions && TheLoop->getLoopDepth() > 1) {
5847       bool HasOrderedReductions =
5848           any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
5849             const RecurrenceDescriptor &RdxDesc = Reduction.second;
5850             return RdxDesc.isOrdered();
5851           });
5852       if (HasOrderedReductions) {
5853         LLVM_DEBUG(
5854             dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
5855         return 1;
5856       }
5857 
5858       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
5859       SmallIC = std::min(SmallIC, F);
5860       StoresIC = std::min(StoresIC, F);
5861       LoadsIC = std::min(LoadsIC, F);
5862     }
5863 
5864     if (EnableLoadStoreRuntimeInterleave &&
5865         std::max(StoresIC, LoadsIC) > SmallIC) {
5866       LLVM_DEBUG(
5867           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
5868       return std::max(StoresIC, LoadsIC);
5869     }
5870 
5871     // If there are scalar reductions and TTI has enabled aggressive
5872     // interleaving for reductions, we will interleave to expose ILP.
5873     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
5874         AggressivelyInterleaveReductions) {
5875       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5876       // Interleave no less than SmallIC but not as aggressive as the normal IC
5877       // to satisfy the rare situation when resources are too limited.
5878       return std::max(IC / 2, SmallIC);
5879     } else {
5880       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
5881       return SmallIC;
5882     }
5883   }
5884 
5885   // Interleave if this is a large loop (small loops are already dealt with by
5886   // this point) that could benefit from interleaving.
5887   if (AggressivelyInterleaveReductions) {
5888     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5889     return IC;
5890   }
5891 
5892   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
5893   return 1;
5894 }
5895 
5896 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
5897 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
5898   // This function calculates the register usage by measuring the highest number
5899   // of values that are alive at a single location. Obviously, this is a very
5900   // rough estimation. We scan the loop in a topological order in order and
5901   // assign a number to each instruction. We use RPO to ensure that defs are
5902   // met before their users. We assume that each instruction that has in-loop
5903   // users starts an interval. We record every time that an in-loop value is
5904   // used, so we have a list of the first and last occurrences of each
5905   // instruction. Next, we transpose this data structure into a multi map that
5906   // holds the list of intervals that *end* at a specific location. This multi
5907   // map allows us to perform a linear search. We scan the instructions linearly
5908   // and record each time that a new interval starts, by placing it in a set.
5909   // If we find this value in the multi-map then we remove it from the set.
5910   // The max register usage is the maximum size of the set.
5911   // We also search for instructions that are defined outside the loop, but are
5912   // used inside the loop. We need this number separately from the max-interval
5913   // usage number because when we unroll, loop-invariant values do not take
5914   // more register.
5915   LoopBlocksDFS DFS(TheLoop);
5916   DFS.perform(LI);
5917 
5918   RegisterUsage RU;
5919 
5920   // Each 'key' in the map opens a new interval. The values
5921   // of the map are the index of the 'last seen' usage of the
5922   // instruction that is the key.
5923   using IntervalMap = DenseMap<Instruction *, unsigned>;
5924 
5925   // Maps instruction to its index.
5926   SmallVector<Instruction *, 64> IdxToInstr;
5927   // Marks the end of each interval.
5928   IntervalMap EndPoint;
5929   // Saves the list of instruction indices that are used in the loop.
5930   SmallPtrSet<Instruction *, 8> Ends;
5931   // Saves the list of values that are used in the loop but are
5932   // defined outside the loop, such as arguments and constants.
5933   SmallPtrSet<Value *, 8> LoopInvariants;
5934 
5935   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
5936     for (Instruction &I : BB->instructionsWithoutDebug()) {
5937       IdxToInstr.push_back(&I);
5938 
5939       // Save the end location of each USE.
5940       for (Value *U : I.operands()) {
5941         auto *Instr = dyn_cast<Instruction>(U);
5942 
5943         // Ignore non-instruction values such as arguments, constants, etc.
5944         if (!Instr)
5945           continue;
5946 
5947         // If this instruction is outside the loop then record it and continue.
5948         if (!TheLoop->contains(Instr)) {
5949           LoopInvariants.insert(Instr);
5950           continue;
5951         }
5952 
5953         // Overwrite previous end points.
5954         EndPoint[Instr] = IdxToInstr.size();
5955         Ends.insert(Instr);
5956       }
5957     }
5958   }
5959 
5960   // Saves the list of intervals that end with the index in 'key'.
5961   using InstrList = SmallVector<Instruction *, 2>;
5962   DenseMap<unsigned, InstrList> TransposeEnds;
5963 
5964   // Transpose the EndPoints to a list of values that end at each index.
5965   for (auto &Interval : EndPoint)
5966     TransposeEnds[Interval.second].push_back(Interval.first);
5967 
5968   SmallPtrSet<Instruction *, 8> OpenIntervals;
5969   SmallVector<RegisterUsage, 8> RUs(VFs.size());
5970   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
5971 
5972   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
5973 
5974   // A lambda that gets the register usage for the given type and VF.
5975   const auto &TTICapture = TTI;
5976   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned {
5977     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
5978       return 0;
5979     InstructionCost::CostType RegUsage =
5980         *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue();
5981     assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() &&
5982            "Nonsensical values for register usage.");
5983     return RegUsage;
5984   };
5985 
5986   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
5987     Instruction *I = IdxToInstr[i];
5988 
5989     // Remove all of the instructions that end at this location.
5990     InstrList &List = TransposeEnds[i];
5991     for (Instruction *ToRemove : List)
5992       OpenIntervals.erase(ToRemove);
5993 
5994     // Ignore instructions that are never used within the loop.
5995     if (!Ends.count(I))
5996       continue;
5997 
5998     // Skip ignored values.
5999     if (ValuesToIgnore.count(I))
6000       continue;
6001 
6002     // For each VF find the maximum usage of registers.
6003     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6004       // Count the number of live intervals.
6005       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6006 
6007       if (VFs[j].isScalar()) {
6008         for (auto Inst : OpenIntervals) {
6009           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6010           if (RegUsage.find(ClassID) == RegUsage.end())
6011             RegUsage[ClassID] = 1;
6012           else
6013             RegUsage[ClassID] += 1;
6014         }
6015       } else {
6016         collectUniformsAndScalars(VFs[j]);
6017         for (auto Inst : OpenIntervals) {
6018           // Skip ignored values for VF > 1.
6019           if (VecValuesToIgnore.count(Inst))
6020             continue;
6021           if (isScalarAfterVectorization(Inst, VFs[j])) {
6022             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6023             if (RegUsage.find(ClassID) == RegUsage.end())
6024               RegUsage[ClassID] = 1;
6025             else
6026               RegUsage[ClassID] += 1;
6027           } else {
6028             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6029             if (RegUsage.find(ClassID) == RegUsage.end())
6030               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6031             else
6032               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6033           }
6034         }
6035       }
6036 
6037       for (auto& pair : RegUsage) {
6038         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6039           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6040         else
6041           MaxUsages[j][pair.first] = pair.second;
6042       }
6043     }
6044 
6045     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6046                       << OpenIntervals.size() << '\n');
6047 
6048     // Add the current instruction to the list of open intervals.
6049     OpenIntervals.insert(I);
6050   }
6051 
6052   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6053     SmallMapVector<unsigned, unsigned, 4> Invariant;
6054 
6055     for (auto Inst : LoopInvariants) {
6056       unsigned Usage =
6057           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6058       unsigned ClassID =
6059           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6060       if (Invariant.find(ClassID) == Invariant.end())
6061         Invariant[ClassID] = Usage;
6062       else
6063         Invariant[ClassID] += Usage;
6064     }
6065 
6066     LLVM_DEBUG({
6067       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6068       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6069              << " item\n";
6070       for (const auto &pair : MaxUsages[i]) {
6071         dbgs() << "LV(REG): RegisterClass: "
6072                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6073                << " registers\n";
6074       }
6075       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6076              << " item\n";
6077       for (const auto &pair : Invariant) {
6078         dbgs() << "LV(REG): RegisterClass: "
6079                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6080                << " registers\n";
6081       }
6082     });
6083 
6084     RU.LoopInvariantRegs = Invariant;
6085     RU.MaxLocalUsers = MaxUsages[i];
6086     RUs[i] = RU;
6087   }
6088 
6089   return RUs;
6090 }
6091 
6092 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I,
6093                                                            ElementCount VF) {
6094   // TODO: Cost model for emulated masked load/store is completely
6095   // broken. This hack guides the cost model to use an artificially
6096   // high enough value to practically disable vectorization with such
6097   // operations, except where previously deployed legality hack allowed
6098   // using very low cost values. This is to avoid regressions coming simply
6099   // from moving "masked load/store" check from legality to cost model.
6100   // Masked Load/Gather emulation was previously never allowed.
6101   // Limited number of Masked Store/Scatter emulation was allowed.
6102   assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction");
6103   return isa<LoadInst>(I) ||
6104          (isa<StoreInst>(I) &&
6105           NumPredStores > NumberOfStoresToPredicate);
6106 }
6107 
6108 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6109   // If we aren't vectorizing the loop, or if we've already collected the
6110   // instructions to scalarize, there's nothing to do. Collection may already
6111   // have occurred if we have a user-selected VF and are now computing the
6112   // expected cost for interleaving.
6113   if (VF.isScalar() || VF.isZero() ||
6114       InstsToScalarize.find(VF) != InstsToScalarize.end())
6115     return;
6116 
6117   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6118   // not profitable to scalarize any instructions, the presence of VF in the
6119   // map will indicate that we've analyzed it already.
6120   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6121 
6122   // Find all the instructions that are scalar with predication in the loop and
6123   // determine if it would be better to not if-convert the blocks they are in.
6124   // If so, we also record the instructions to scalarize.
6125   for (BasicBlock *BB : TheLoop->blocks()) {
6126     if (!blockNeedsPredicationForAnyReason(BB))
6127       continue;
6128     for (Instruction &I : *BB)
6129       if (isScalarWithPredication(&I, VF)) {
6130         ScalarCostsTy ScalarCosts;
6131         // Do not apply discount if scalable, because that would lead to
6132         // invalid scalarization costs.
6133         // Do not apply discount logic if hacked cost is needed
6134         // for emulated masked memrefs.
6135         if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) &&
6136             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6137           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6138         // Remember that BB will remain after vectorization.
6139         PredicatedBBsAfterVectorization.insert(BB);
6140       }
6141   }
6142 }
6143 
6144 int LoopVectorizationCostModel::computePredInstDiscount(
6145     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6146   assert(!isUniformAfterVectorization(PredInst, VF) &&
6147          "Instruction marked uniform-after-vectorization will be predicated");
6148 
6149   // Initialize the discount to zero, meaning that the scalar version and the
6150   // vector version cost the same.
6151   InstructionCost Discount = 0;
6152 
6153   // Holds instructions to analyze. The instructions we visit are mapped in
6154   // ScalarCosts. Those instructions are the ones that would be scalarized if
6155   // we find that the scalar version costs less.
6156   SmallVector<Instruction *, 8> Worklist;
6157 
6158   // Returns true if the given instruction can be scalarized.
6159   auto canBeScalarized = [&](Instruction *I) -> bool {
6160     // We only attempt to scalarize instructions forming a single-use chain
6161     // from the original predicated block that would otherwise be vectorized.
6162     // Although not strictly necessary, we give up on instructions we know will
6163     // already be scalar to avoid traversing chains that are unlikely to be
6164     // beneficial.
6165     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6166         isScalarAfterVectorization(I, VF))
6167       return false;
6168 
6169     // If the instruction is scalar with predication, it will be analyzed
6170     // separately. We ignore it within the context of PredInst.
6171     if (isScalarWithPredication(I, VF))
6172       return false;
6173 
6174     // If any of the instruction's operands are uniform after vectorization,
6175     // the instruction cannot be scalarized. This prevents, for example, a
6176     // masked load from being scalarized.
6177     //
6178     // We assume we will only emit a value for lane zero of an instruction
6179     // marked uniform after vectorization, rather than VF identical values.
6180     // Thus, if we scalarize an instruction that uses a uniform, we would
6181     // create uses of values corresponding to the lanes we aren't emitting code
6182     // for. This behavior can be changed by allowing getScalarValue to clone
6183     // the lane zero values for uniforms rather than asserting.
6184     for (Use &U : I->operands())
6185       if (auto *J = dyn_cast<Instruction>(U.get()))
6186         if (isUniformAfterVectorization(J, VF))
6187           return false;
6188 
6189     // Otherwise, we can scalarize the instruction.
6190     return true;
6191   };
6192 
6193   // Compute the expected cost discount from scalarizing the entire expression
6194   // feeding the predicated instruction. We currently only consider expressions
6195   // that are single-use instruction chains.
6196   Worklist.push_back(PredInst);
6197   while (!Worklist.empty()) {
6198     Instruction *I = Worklist.pop_back_val();
6199 
6200     // If we've already analyzed the instruction, there's nothing to do.
6201     if (ScalarCosts.find(I) != ScalarCosts.end())
6202       continue;
6203 
6204     // Compute the cost of the vector instruction. Note that this cost already
6205     // includes the scalarization overhead of the predicated instruction.
6206     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6207 
6208     // Compute the cost of the scalarized instruction. This cost is the cost of
6209     // the instruction as if it wasn't if-converted and instead remained in the
6210     // predicated block. We will scale this cost by block probability after
6211     // computing the scalarization overhead.
6212     InstructionCost ScalarCost =
6213         VF.getFixedValue() *
6214         getInstructionCost(I, ElementCount::getFixed(1)).first;
6215 
6216     // Compute the scalarization overhead of needed insertelement instructions
6217     // and phi nodes.
6218     if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
6219       ScalarCost += TTI.getScalarizationOverhead(
6220           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6221           APInt::getAllOnes(VF.getFixedValue()), true, false);
6222       ScalarCost +=
6223           VF.getFixedValue() *
6224           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6225     }
6226 
6227     // Compute the scalarization overhead of needed extractelement
6228     // instructions. For each of the instruction's operands, if the operand can
6229     // be scalarized, add it to the worklist; otherwise, account for the
6230     // overhead.
6231     for (Use &U : I->operands())
6232       if (auto *J = dyn_cast<Instruction>(U.get())) {
6233         assert(VectorType::isValidElementType(J->getType()) &&
6234                "Instruction has non-scalar type");
6235         if (canBeScalarized(J))
6236           Worklist.push_back(J);
6237         else if (needsExtract(J, VF)) {
6238           ScalarCost += TTI.getScalarizationOverhead(
6239               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6240               APInt::getAllOnes(VF.getFixedValue()), false, true);
6241         }
6242       }
6243 
6244     // Scale the total scalar cost by block probability.
6245     ScalarCost /= getReciprocalPredBlockProb();
6246 
6247     // Compute the discount. A non-negative discount means the vector version
6248     // of the instruction costs more, and scalarizing would be beneficial.
6249     Discount += VectorCost - ScalarCost;
6250     ScalarCosts[I] = ScalarCost;
6251   }
6252 
6253   return *Discount.getValue();
6254 }
6255 
6256 LoopVectorizationCostModel::VectorizationCostTy
6257 LoopVectorizationCostModel::expectedCost(
6258     ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) {
6259   VectorizationCostTy Cost;
6260 
6261   // For each block.
6262   for (BasicBlock *BB : TheLoop->blocks()) {
6263     VectorizationCostTy BlockCost;
6264 
6265     // For each instruction in the old loop.
6266     for (Instruction &I : BB->instructionsWithoutDebug()) {
6267       // Skip ignored values.
6268       if (ValuesToIgnore.count(&I) ||
6269           (VF.isVector() && VecValuesToIgnore.count(&I)))
6270         continue;
6271 
6272       VectorizationCostTy C = getInstructionCost(&I, VF);
6273 
6274       // Check if we should override the cost.
6275       if (C.first.isValid() &&
6276           ForceTargetInstructionCost.getNumOccurrences() > 0)
6277         C.first = InstructionCost(ForceTargetInstructionCost);
6278 
6279       // Keep a list of instructions with invalid costs.
6280       if (Invalid && !C.first.isValid())
6281         Invalid->emplace_back(&I, VF);
6282 
6283       BlockCost.first += C.first;
6284       BlockCost.second |= C.second;
6285       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6286                         << " for VF " << VF << " For instruction: " << I
6287                         << '\n');
6288     }
6289 
6290     // If we are vectorizing a predicated block, it will have been
6291     // if-converted. This means that the block's instructions (aside from
6292     // stores and instructions that may divide by zero) will now be
6293     // unconditionally executed. For the scalar case, we may not always execute
6294     // the predicated block, if it is an if-else block. Thus, scale the block's
6295     // cost by the probability of executing it. blockNeedsPredication from
6296     // Legal is used so as to not include all blocks in tail folded loops.
6297     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6298       BlockCost.first /= getReciprocalPredBlockProb();
6299 
6300     Cost.first += BlockCost.first;
6301     Cost.second |= BlockCost.second;
6302   }
6303 
6304   return Cost;
6305 }
6306 
6307 /// Gets Address Access SCEV after verifying that the access pattern
6308 /// is loop invariant except the induction variable dependence.
6309 ///
6310 /// This SCEV can be sent to the Target in order to estimate the address
6311 /// calculation cost.
6312 static const SCEV *getAddressAccessSCEV(
6313               Value *Ptr,
6314               LoopVectorizationLegality *Legal,
6315               PredicatedScalarEvolution &PSE,
6316               const Loop *TheLoop) {
6317 
6318   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6319   if (!Gep)
6320     return nullptr;
6321 
6322   // We are looking for a gep with all loop invariant indices except for one
6323   // which should be an induction variable.
6324   auto SE = PSE.getSE();
6325   unsigned NumOperands = Gep->getNumOperands();
6326   for (unsigned i = 1; i < NumOperands; ++i) {
6327     Value *Opd = Gep->getOperand(i);
6328     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6329         !Legal->isInductionVariable(Opd))
6330       return nullptr;
6331   }
6332 
6333   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6334   return PSE.getSCEV(Ptr);
6335 }
6336 
6337 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6338   return Legal->hasStride(I->getOperand(0)) ||
6339          Legal->hasStride(I->getOperand(1));
6340 }
6341 
6342 InstructionCost
6343 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6344                                                         ElementCount VF) {
6345   assert(VF.isVector() &&
6346          "Scalarization cost of instruction implies vectorization.");
6347   if (VF.isScalable())
6348     return InstructionCost::getInvalid();
6349 
6350   Type *ValTy = getLoadStoreType(I);
6351   auto SE = PSE.getSE();
6352 
6353   unsigned AS = getLoadStoreAddressSpace(I);
6354   Value *Ptr = getLoadStorePointerOperand(I);
6355   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6356   // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
6357   //       that it is being called from this specific place.
6358 
6359   // Figure out whether the access is strided and get the stride value
6360   // if it's known in compile time
6361   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6362 
6363   // Get the cost of the scalar memory instruction and address computation.
6364   InstructionCost Cost =
6365       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6366 
6367   // Don't pass *I here, since it is scalar but will actually be part of a
6368   // vectorized loop where the user of it is a vectorized instruction.
6369   const Align Alignment = getLoadStoreAlignment(I);
6370   Cost += VF.getKnownMinValue() *
6371           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6372                               AS, TTI::TCK_RecipThroughput);
6373 
6374   // Get the overhead of the extractelement and insertelement instructions
6375   // we might create due to scalarization.
6376   Cost += getScalarizationOverhead(I, VF);
6377 
6378   // If we have a predicated load/store, it will need extra i1 extracts and
6379   // conditional branches, but may not be executed for each vector lane. Scale
6380   // the cost by the probability of executing the predicated block.
6381   if (isPredicatedInst(I, VF)) {
6382     Cost /= getReciprocalPredBlockProb();
6383 
6384     // Add the cost of an i1 extract and a branch
6385     auto *Vec_i1Ty =
6386         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
6387     Cost += TTI.getScalarizationOverhead(
6388         Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()),
6389         /*Insert=*/false, /*Extract=*/true);
6390     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
6391 
6392     if (useEmulatedMaskMemRefHack(I, VF))
6393       // Artificially setting to a high enough value to practically disable
6394       // vectorization with such operations.
6395       Cost = 3000000;
6396   }
6397 
6398   return Cost;
6399 }
6400 
6401 InstructionCost
6402 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6403                                                     ElementCount VF) {
6404   Type *ValTy = getLoadStoreType(I);
6405   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6406   Value *Ptr = getLoadStorePointerOperand(I);
6407   unsigned AS = getLoadStoreAddressSpace(I);
6408   int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
6409   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6410 
6411   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6412          "Stride should be 1 or -1 for consecutive memory access");
6413   const Align Alignment = getLoadStoreAlignment(I);
6414   InstructionCost Cost = 0;
6415   if (Legal->isMaskRequired(I))
6416     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6417                                       CostKind);
6418   else
6419     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6420                                 CostKind, I);
6421 
6422   bool Reverse = ConsecutiveStride < 0;
6423   if (Reverse)
6424     Cost +=
6425         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6426   return Cost;
6427 }
6428 
6429 InstructionCost
6430 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6431                                                 ElementCount VF) {
6432   assert(Legal->isUniformMemOp(*I));
6433 
6434   Type *ValTy = getLoadStoreType(I);
6435   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6436   const Align Alignment = getLoadStoreAlignment(I);
6437   unsigned AS = getLoadStoreAddressSpace(I);
6438   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6439   if (isa<LoadInst>(I)) {
6440     return TTI.getAddressComputationCost(ValTy) +
6441            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6442                                CostKind) +
6443            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6444   }
6445   StoreInst *SI = cast<StoreInst>(I);
6446 
6447   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6448   return TTI.getAddressComputationCost(ValTy) +
6449          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6450                              CostKind) +
6451          (isLoopInvariantStoreValue
6452               ? 0
6453               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6454                                        VF.getKnownMinValue() - 1));
6455 }
6456 
6457 InstructionCost
6458 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6459                                                  ElementCount VF) {
6460   Type *ValTy = getLoadStoreType(I);
6461   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6462   const Align Alignment = getLoadStoreAlignment(I);
6463   const Value *Ptr = getLoadStorePointerOperand(I);
6464 
6465   return TTI.getAddressComputationCost(VectorTy) +
6466          TTI.getGatherScatterOpCost(
6467              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6468              TargetTransformInfo::TCK_RecipThroughput, I);
6469 }
6470 
6471 InstructionCost
6472 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6473                                                    ElementCount VF) {
6474   // TODO: Once we have support for interleaving with scalable vectors
6475   // we can calculate the cost properly here.
6476   if (VF.isScalable())
6477     return InstructionCost::getInvalid();
6478 
6479   Type *ValTy = getLoadStoreType(I);
6480   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6481   unsigned AS = getLoadStoreAddressSpace(I);
6482 
6483   auto Group = getInterleavedAccessGroup(I);
6484   assert(Group && "Fail to get an interleaved access group.");
6485 
6486   unsigned InterleaveFactor = Group->getFactor();
6487   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6488 
6489   // Holds the indices of existing members in the interleaved group.
6490   SmallVector<unsigned, 4> Indices;
6491   for (unsigned IF = 0; IF < InterleaveFactor; IF++)
6492     if (Group->getMember(IF))
6493       Indices.push_back(IF);
6494 
6495   // Calculate the cost of the whole interleaved group.
6496   bool UseMaskForGaps =
6497       (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
6498       (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()));
6499   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
6500       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6501       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6502 
6503   if (Group->isReverse()) {
6504     // TODO: Add support for reversed masked interleaved access.
6505     assert(!Legal->isMaskRequired(I) &&
6506            "Reverse masked interleaved access not supported.");
6507     Cost +=
6508         Group->getNumMembers() *
6509         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6510   }
6511   return Cost;
6512 }
6513 
6514 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost(
6515     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
6516   using namespace llvm::PatternMatch;
6517   // Early exit for no inloop reductions
6518   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
6519     return None;
6520   auto *VectorTy = cast<VectorType>(Ty);
6521 
6522   // We are looking for a pattern of, and finding the minimal acceptable cost:
6523   //  reduce(mul(ext(A), ext(B))) or
6524   //  reduce(mul(A, B)) or
6525   //  reduce(ext(A)) or
6526   //  reduce(A).
6527   // The basic idea is that we walk down the tree to do that, finding the root
6528   // reduction instruction in InLoopReductionImmediateChains. From there we find
6529   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
6530   // of the components. If the reduction cost is lower then we return it for the
6531   // reduction instruction and 0 for the other instructions in the pattern. If
6532   // it is not we return an invalid cost specifying the orignal cost method
6533   // should be used.
6534   Instruction *RetI = I;
6535   if (match(RetI, m_ZExtOrSExt(m_Value()))) {
6536     if (!RetI->hasOneUser())
6537       return None;
6538     RetI = RetI->user_back();
6539   }
6540   if (match(RetI, m_Mul(m_Value(), m_Value())) &&
6541       RetI->user_back()->getOpcode() == Instruction::Add) {
6542     if (!RetI->hasOneUser())
6543       return None;
6544     RetI = RetI->user_back();
6545   }
6546 
6547   // Test if the found instruction is a reduction, and if not return an invalid
6548   // cost specifying the parent to use the original cost modelling.
6549   if (!InLoopReductionImmediateChains.count(RetI))
6550     return None;
6551 
6552   // Find the reduction this chain is a part of and calculate the basic cost of
6553   // the reduction on its own.
6554   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
6555   Instruction *ReductionPhi = LastChain;
6556   while (!isa<PHINode>(ReductionPhi))
6557     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
6558 
6559   const RecurrenceDescriptor &RdxDesc =
6560       Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second;
6561 
6562   InstructionCost BaseCost = TTI.getArithmeticReductionCost(
6563       RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
6564 
6565   // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
6566   // normal fmul instruction to the cost of the fadd reduction.
6567   if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd)
6568     BaseCost +=
6569         TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
6570 
6571   // If we're using ordered reductions then we can just return the base cost
6572   // here, since getArithmeticReductionCost calculates the full ordered
6573   // reduction cost when FP reassociation is not allowed.
6574   if (useOrderedReductions(RdxDesc))
6575     return BaseCost;
6576 
6577   // Get the operand that was not the reduction chain and match it to one of the
6578   // patterns, returning the better cost if it is found.
6579   Instruction *RedOp = RetI->getOperand(1) == LastChain
6580                            ? dyn_cast<Instruction>(RetI->getOperand(0))
6581                            : dyn_cast<Instruction>(RetI->getOperand(1));
6582 
6583   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
6584 
6585   Instruction *Op0, *Op1;
6586   if (RedOp &&
6587       match(RedOp,
6588             m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) &&
6589       match(Op0, m_ZExtOrSExt(m_Value())) &&
6590       Op0->getOpcode() == Op1->getOpcode() &&
6591       Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
6592       !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
6593       (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
6594 
6595     // Matched reduce(ext(mul(ext(A), ext(B)))
6596     // Note that the extend opcodes need to all match, or if A==B they will have
6597     // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
6598     // which is equally fine.
6599     bool IsUnsigned = isa<ZExtInst>(Op0);
6600     auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
6601     auto *MulType = VectorType::get(Op0->getType(), VectorTy);
6602 
6603     InstructionCost ExtCost =
6604         TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
6605                              TTI::CastContextHint::None, CostKind, Op0);
6606     InstructionCost MulCost =
6607         TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
6608     InstructionCost Ext2Cost =
6609         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
6610                              TTI::CastContextHint::None, CostKind, RedOp);
6611 
6612     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6613         /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6614         CostKind);
6615 
6616     if (RedCost.isValid() &&
6617         RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
6618       return I == RetI ? RedCost : 0;
6619   } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
6620              !TheLoop->isLoopInvariant(RedOp)) {
6621     // Matched reduce(ext(A))
6622     bool IsUnsigned = isa<ZExtInst>(RedOp);
6623     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
6624     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6625         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6626         CostKind);
6627 
6628     InstructionCost ExtCost =
6629         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
6630                              TTI::CastContextHint::None, CostKind, RedOp);
6631     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
6632       return I == RetI ? RedCost : 0;
6633   } else if (RedOp &&
6634              match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
6635     if (match(Op0, m_ZExtOrSExt(m_Value())) &&
6636         Op0->getOpcode() == Op1->getOpcode() &&
6637         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
6638       bool IsUnsigned = isa<ZExtInst>(Op0);
6639       Type *Op0Ty = Op0->getOperand(0)->getType();
6640       Type *Op1Ty = Op1->getOperand(0)->getType();
6641       Type *LargestOpTy =
6642           Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
6643                                                                     : Op0Ty;
6644       auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
6645 
6646       // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of
6647       // different sizes. We take the largest type as the ext to reduce, and add
6648       // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
6649       InstructionCost ExtCost0 = TTI.getCastInstrCost(
6650           Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
6651           TTI::CastContextHint::None, CostKind, Op0);
6652       InstructionCost ExtCost1 = TTI.getCastInstrCost(
6653           Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
6654           TTI::CastContextHint::None, CostKind, Op1);
6655       InstructionCost MulCost =
6656           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6657 
6658       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6659           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6660           CostKind);
6661       InstructionCost ExtraExtCost = 0;
6662       if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
6663         Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
6664         ExtraExtCost = TTI.getCastInstrCost(
6665             ExtraExtOp->getOpcode(), ExtType,
6666             VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
6667             TTI::CastContextHint::None, CostKind, ExtraExtOp);
6668       }
6669 
6670       if (RedCost.isValid() &&
6671           (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
6672         return I == RetI ? RedCost : 0;
6673     } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
6674       // Matched reduce(mul())
6675       InstructionCost MulCost =
6676           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6677 
6678       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6679           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
6680           CostKind);
6681 
6682       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
6683         return I == RetI ? RedCost : 0;
6684     }
6685   }
6686 
6687   return I == RetI ? Optional<InstructionCost>(BaseCost) : None;
6688 }
6689 
6690 InstructionCost
6691 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
6692                                                      ElementCount VF) {
6693   // Calculate scalar cost only. Vectorization cost should be ready at this
6694   // moment.
6695   if (VF.isScalar()) {
6696     Type *ValTy = getLoadStoreType(I);
6697     const Align Alignment = getLoadStoreAlignment(I);
6698     unsigned AS = getLoadStoreAddressSpace(I);
6699 
6700     return TTI.getAddressComputationCost(ValTy) +
6701            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
6702                                TTI::TCK_RecipThroughput, I);
6703   }
6704   return getWideningCost(I, VF);
6705 }
6706 
6707 LoopVectorizationCostModel::VectorizationCostTy
6708 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6709                                                ElementCount VF) {
6710   // If we know that this instruction will remain uniform, check the cost of
6711   // the scalar version.
6712   if (isUniformAfterVectorization(I, VF))
6713     VF = ElementCount::getFixed(1);
6714 
6715   if (VF.isVector() && isProfitableToScalarize(I, VF))
6716     return VectorizationCostTy(InstsToScalarize[VF][I], false);
6717 
6718   // Forced scalars do not have any scalarization overhead.
6719   auto ForcedScalar = ForcedScalars.find(VF);
6720   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
6721     auto InstSet = ForcedScalar->second;
6722     if (InstSet.count(I))
6723       return VectorizationCostTy(
6724           (getInstructionCost(I, ElementCount::getFixed(1)).first *
6725            VF.getKnownMinValue()),
6726           false);
6727   }
6728 
6729   Type *VectorTy;
6730   InstructionCost C = getInstructionCost(I, VF, VectorTy);
6731 
6732   bool TypeNotScalarized = false;
6733   if (VF.isVector() && VectorTy->isVectorTy()) {
6734     unsigned NumParts = TTI.getNumberOfParts(VectorTy);
6735     if (NumParts)
6736       TypeNotScalarized = NumParts < VF.getKnownMinValue();
6737     else
6738       C = InstructionCost::getInvalid();
6739   }
6740   return VectorizationCostTy(C, TypeNotScalarized);
6741 }
6742 
6743 InstructionCost
6744 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
6745                                                      ElementCount VF) const {
6746 
6747   // There is no mechanism yet to create a scalable scalarization loop,
6748   // so this is currently Invalid.
6749   if (VF.isScalable())
6750     return InstructionCost::getInvalid();
6751 
6752   if (VF.isScalar())
6753     return 0;
6754 
6755   InstructionCost Cost = 0;
6756   Type *RetTy = ToVectorTy(I->getType(), VF);
6757   if (!RetTy->isVoidTy() &&
6758       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
6759     Cost += TTI.getScalarizationOverhead(
6760         cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true,
6761         false);
6762 
6763   // Some targets keep addresses scalar.
6764   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
6765     return Cost;
6766 
6767   // Some targets support efficient element stores.
6768   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
6769     return Cost;
6770 
6771   // Collect operands to consider.
6772   CallInst *CI = dyn_cast<CallInst>(I);
6773   Instruction::op_range Ops = CI ? CI->args() : I->operands();
6774 
6775   // Skip operands that do not require extraction/scalarization and do not incur
6776   // any overhead.
6777   SmallVector<Type *> Tys;
6778   for (auto *V : filterExtractingOperands(Ops, VF))
6779     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
6780   return Cost + TTI.getOperandsScalarizationOverhead(
6781                     filterExtractingOperands(Ops, VF), Tys);
6782 }
6783 
6784 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
6785   if (VF.isScalar())
6786     return;
6787   NumPredStores = 0;
6788   for (BasicBlock *BB : TheLoop->blocks()) {
6789     // For each instruction in the old loop.
6790     for (Instruction &I : *BB) {
6791       Value *Ptr =  getLoadStorePointerOperand(&I);
6792       if (!Ptr)
6793         continue;
6794 
6795       // TODO: We should generate better code and update the cost model for
6796       // predicated uniform stores. Today they are treated as any other
6797       // predicated store (see added test cases in
6798       // invariant-store-vectorization.ll).
6799       if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF))
6800         NumPredStores++;
6801 
6802       if (Legal->isUniformMemOp(I)) {
6803         // TODO: Avoid replicating loads and stores instead of
6804         // relying on instcombine to remove them.
6805         // Load: Scalar load + broadcast
6806         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
6807         InstructionCost Cost;
6808         if (isa<StoreInst>(&I) && VF.isScalable() &&
6809             isLegalGatherOrScatter(&I, VF)) {
6810           Cost = getGatherScatterCost(&I, VF);
6811           setWideningDecision(&I, VF, CM_GatherScatter, Cost);
6812         } else {
6813           assert((isa<LoadInst>(&I) || !VF.isScalable()) &&
6814                  "Cannot yet scalarize uniform stores");
6815           Cost = getUniformMemOpCost(&I, VF);
6816           setWideningDecision(&I, VF, CM_Scalarize, Cost);
6817         }
6818         continue;
6819       }
6820 
6821       // We assume that widening is the best solution when possible.
6822       if (memoryInstructionCanBeWidened(&I, VF)) {
6823         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
6824         int ConsecutiveStride = Legal->isConsecutivePtr(
6825             getLoadStoreType(&I), getLoadStorePointerOperand(&I));
6826         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6827                "Expected consecutive stride.");
6828         InstWidening Decision =
6829             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
6830         setWideningDecision(&I, VF, Decision, Cost);
6831         continue;
6832       }
6833 
6834       // Choose between Interleaving, Gather/Scatter or Scalarization.
6835       InstructionCost InterleaveCost = InstructionCost::getInvalid();
6836       unsigned NumAccesses = 1;
6837       if (isAccessInterleaved(&I)) {
6838         auto Group = getInterleavedAccessGroup(&I);
6839         assert(Group && "Fail to get an interleaved access group.");
6840 
6841         // Make one decision for the whole group.
6842         if (getWideningDecision(&I, VF) != CM_Unknown)
6843           continue;
6844 
6845         NumAccesses = Group->getNumMembers();
6846         if (interleavedAccessCanBeWidened(&I, VF))
6847           InterleaveCost = getInterleaveGroupCost(&I, VF);
6848       }
6849 
6850       InstructionCost GatherScatterCost =
6851           isLegalGatherOrScatter(&I, VF)
6852               ? getGatherScatterCost(&I, VF) * NumAccesses
6853               : InstructionCost::getInvalid();
6854 
6855       InstructionCost ScalarizationCost =
6856           getMemInstScalarizationCost(&I, VF) * NumAccesses;
6857 
6858       // Choose better solution for the current VF,
6859       // write down this decision and use it during vectorization.
6860       InstructionCost Cost;
6861       InstWidening Decision;
6862       if (InterleaveCost <= GatherScatterCost &&
6863           InterleaveCost < ScalarizationCost) {
6864         Decision = CM_Interleave;
6865         Cost = InterleaveCost;
6866       } else if (GatherScatterCost < ScalarizationCost) {
6867         Decision = CM_GatherScatter;
6868         Cost = GatherScatterCost;
6869       } else {
6870         Decision = CM_Scalarize;
6871         Cost = ScalarizationCost;
6872       }
6873       // If the instructions belongs to an interleave group, the whole group
6874       // receives the same decision. The whole group receives the cost, but
6875       // the cost will actually be assigned to one instruction.
6876       if (auto Group = getInterleavedAccessGroup(&I))
6877         setWideningDecision(Group, VF, Decision, Cost);
6878       else
6879         setWideningDecision(&I, VF, Decision, Cost);
6880     }
6881   }
6882 
6883   // Make sure that any load of address and any other address computation
6884   // remains scalar unless there is gather/scatter support. This avoids
6885   // inevitable extracts into address registers, and also has the benefit of
6886   // activating LSR more, since that pass can't optimize vectorized
6887   // addresses.
6888   if (TTI.prefersVectorizedAddressing())
6889     return;
6890 
6891   // Start with all scalar pointer uses.
6892   SmallPtrSet<Instruction *, 8> AddrDefs;
6893   for (BasicBlock *BB : TheLoop->blocks())
6894     for (Instruction &I : *BB) {
6895       Instruction *PtrDef =
6896         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
6897       if (PtrDef && TheLoop->contains(PtrDef) &&
6898           getWideningDecision(&I, VF) != CM_GatherScatter)
6899         AddrDefs.insert(PtrDef);
6900     }
6901 
6902   // Add all instructions used to generate the addresses.
6903   SmallVector<Instruction *, 4> Worklist;
6904   append_range(Worklist, AddrDefs);
6905   while (!Worklist.empty()) {
6906     Instruction *I = Worklist.pop_back_val();
6907     for (auto &Op : I->operands())
6908       if (auto *InstOp = dyn_cast<Instruction>(Op))
6909         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
6910             AddrDefs.insert(InstOp).second)
6911           Worklist.push_back(InstOp);
6912   }
6913 
6914   for (auto *I : AddrDefs) {
6915     if (isa<LoadInst>(I)) {
6916       // Setting the desired widening decision should ideally be handled in
6917       // by cost functions, but since this involves the task of finding out
6918       // if the loaded register is involved in an address computation, it is
6919       // instead changed here when we know this is the case.
6920       InstWidening Decision = getWideningDecision(I, VF);
6921       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
6922         // Scalarize a widened load of address.
6923         setWideningDecision(
6924             I, VF, CM_Scalarize,
6925             (VF.getKnownMinValue() *
6926              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
6927       else if (auto Group = getInterleavedAccessGroup(I)) {
6928         // Scalarize an interleave group of address loads.
6929         for (unsigned I = 0; I < Group->getFactor(); ++I) {
6930           if (Instruction *Member = Group->getMember(I))
6931             setWideningDecision(
6932                 Member, VF, CM_Scalarize,
6933                 (VF.getKnownMinValue() *
6934                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
6935         }
6936       }
6937     } else
6938       // Make sure I gets scalarized and a cost estimate without
6939       // scalarization overhead.
6940       ForcedScalars[VF].insert(I);
6941   }
6942 }
6943 
6944 InstructionCost
6945 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
6946                                                Type *&VectorTy) {
6947   Type *RetTy = I->getType();
6948   if (canTruncateToMinimalBitwidth(I, VF))
6949     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
6950   auto SE = PSE.getSE();
6951   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6952 
6953   auto hasSingleCopyAfterVectorization = [this](Instruction *I,
6954                                                 ElementCount VF) -> bool {
6955     if (VF.isScalar())
6956       return true;
6957 
6958     auto Scalarized = InstsToScalarize.find(VF);
6959     assert(Scalarized != InstsToScalarize.end() &&
6960            "VF not yet analyzed for scalarization profitability");
6961     return !Scalarized->second.count(I) &&
6962            llvm::all_of(I->users(), [&](User *U) {
6963              auto *UI = cast<Instruction>(U);
6964              return !Scalarized->second.count(UI);
6965            });
6966   };
6967   (void) hasSingleCopyAfterVectorization;
6968 
6969   if (isScalarAfterVectorization(I, VF)) {
6970     // With the exception of GEPs and PHIs, after scalarization there should
6971     // only be one copy of the instruction generated in the loop. This is
6972     // because the VF is either 1, or any instructions that need scalarizing
6973     // have already been dealt with by the the time we get here. As a result,
6974     // it means we don't have to multiply the instruction cost by VF.
6975     assert(I->getOpcode() == Instruction::GetElementPtr ||
6976            I->getOpcode() == Instruction::PHI ||
6977            (I->getOpcode() == Instruction::BitCast &&
6978             I->getType()->isPointerTy()) ||
6979            hasSingleCopyAfterVectorization(I, VF));
6980     VectorTy = RetTy;
6981   } else
6982     VectorTy = ToVectorTy(RetTy, VF);
6983 
6984   // TODO: We need to estimate the cost of intrinsic calls.
6985   switch (I->getOpcode()) {
6986   case Instruction::GetElementPtr:
6987     // We mark this instruction as zero-cost because the cost of GEPs in
6988     // vectorized code depends on whether the corresponding memory instruction
6989     // is scalarized or not. Therefore, we handle GEPs with the memory
6990     // instruction cost.
6991     return 0;
6992   case Instruction::Br: {
6993     // In cases of scalarized and predicated instructions, there will be VF
6994     // predicated blocks in the vectorized loop. Each branch around these
6995     // blocks requires also an extract of its vector compare i1 element.
6996     bool ScalarPredicatedBB = false;
6997     BranchInst *BI = cast<BranchInst>(I);
6998     if (VF.isVector() && BI->isConditional() &&
6999         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7000          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7001       ScalarPredicatedBB = true;
7002 
7003     if (ScalarPredicatedBB) {
7004       // Not possible to scalarize scalable vector with predicated instructions.
7005       if (VF.isScalable())
7006         return InstructionCost::getInvalid();
7007       // Return cost for branches around scalarized and predicated blocks.
7008       auto *Vec_i1Ty =
7009           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7010       return (
7011           TTI.getScalarizationOverhead(
7012               Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) +
7013           (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
7014     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7015       // The back-edge branch will remain, as will all scalar branches.
7016       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7017     else
7018       // This branch will be eliminated by if-conversion.
7019       return 0;
7020     // Note: We currently assume zero cost for an unconditional branch inside
7021     // a predicated block since it will become a fall-through, although we
7022     // may decide in the future to call TTI for all branches.
7023   }
7024   case Instruction::PHI: {
7025     auto *Phi = cast<PHINode>(I);
7026 
7027     // First-order recurrences are replaced by vector shuffles inside the loop.
7028     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7029     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7030       return TTI.getShuffleCost(
7031           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7032           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7033 
7034     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7035     // converted into select instructions. We require N - 1 selects per phi
7036     // node, where N is the number of incoming values.
7037     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7038       return (Phi->getNumIncomingValues() - 1) *
7039              TTI.getCmpSelInstrCost(
7040                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7041                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7042                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7043 
7044     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7045   }
7046   case Instruction::UDiv:
7047   case Instruction::SDiv:
7048   case Instruction::URem:
7049   case Instruction::SRem:
7050     // If we have a predicated instruction, it may not be executed for each
7051     // vector lane. Get the scalarization cost and scale this amount by the
7052     // probability of executing the predicated block. If the instruction is not
7053     // predicated, we fall through to the next case.
7054     if (VF.isVector() && isScalarWithPredication(I, VF)) {
7055       InstructionCost Cost = 0;
7056 
7057       // These instructions have a non-void type, so account for the phi nodes
7058       // that we will create. This cost is likely to be zero. The phi node
7059       // cost, if any, should be scaled by the block probability because it
7060       // models a copy at the end of each predicated block.
7061       Cost += VF.getKnownMinValue() *
7062               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7063 
7064       // The cost of the non-predicated instruction.
7065       Cost += VF.getKnownMinValue() *
7066               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7067 
7068       // The cost of insertelement and extractelement instructions needed for
7069       // scalarization.
7070       Cost += getScalarizationOverhead(I, VF);
7071 
7072       // Scale the cost by the probability of executing the predicated blocks.
7073       // This assumes the predicated block for each vector lane is equally
7074       // likely.
7075       return Cost / getReciprocalPredBlockProb();
7076     }
7077     LLVM_FALLTHROUGH;
7078   case Instruction::Add:
7079   case Instruction::FAdd:
7080   case Instruction::Sub:
7081   case Instruction::FSub:
7082   case Instruction::Mul:
7083   case Instruction::FMul:
7084   case Instruction::FDiv:
7085   case Instruction::FRem:
7086   case Instruction::Shl:
7087   case Instruction::LShr:
7088   case Instruction::AShr:
7089   case Instruction::And:
7090   case Instruction::Or:
7091   case Instruction::Xor: {
7092     // Since we will replace the stride by 1 the multiplication should go away.
7093     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7094       return 0;
7095 
7096     // Detect reduction patterns
7097     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7098       return *RedCost;
7099 
7100     // Certain instructions can be cheaper to vectorize if they have a constant
7101     // second vector operand. One example of this are shifts on x86.
7102     Value *Op2 = I->getOperand(1);
7103     TargetTransformInfo::OperandValueProperties Op2VP;
7104     TargetTransformInfo::OperandValueKind Op2VK =
7105         TTI.getOperandInfo(Op2, Op2VP);
7106     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7107       Op2VK = TargetTransformInfo::OK_UniformValue;
7108 
7109     SmallVector<const Value *, 4> Operands(I->operand_values());
7110     return TTI.getArithmeticInstrCost(
7111         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7112         Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7113   }
7114   case Instruction::FNeg: {
7115     return TTI.getArithmeticInstrCost(
7116         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7117         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None,
7118         TargetTransformInfo::OP_None, I->getOperand(0), I);
7119   }
7120   case Instruction::Select: {
7121     SelectInst *SI = cast<SelectInst>(I);
7122     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7123     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7124 
7125     const Value *Op0, *Op1;
7126     using namespace llvm::PatternMatch;
7127     if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
7128                         match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
7129       // select x, y, false --> x & y
7130       // select x, true, y --> x | y
7131       TTI::OperandValueProperties Op1VP = TTI::OP_None;
7132       TTI::OperandValueProperties Op2VP = TTI::OP_None;
7133       TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP);
7134       TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP);
7135       assert(Op0->getType()->getScalarSizeInBits() == 1 &&
7136               Op1->getType()->getScalarSizeInBits() == 1);
7137 
7138       SmallVector<const Value *, 2> Operands{Op0, Op1};
7139       return TTI.getArithmeticInstrCost(
7140           match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
7141           CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I);
7142     }
7143 
7144     Type *CondTy = SI->getCondition()->getType();
7145     if (!ScalarCond)
7146       CondTy = VectorType::get(CondTy, VF);
7147 
7148     CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
7149     if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
7150       Pred = Cmp->getPredicate();
7151     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
7152                                   CostKind, I);
7153   }
7154   case Instruction::ICmp:
7155   case Instruction::FCmp: {
7156     Type *ValTy = I->getOperand(0)->getType();
7157     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7158     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7159       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7160     VectorTy = ToVectorTy(ValTy, VF);
7161     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7162                                   cast<CmpInst>(I)->getPredicate(), CostKind,
7163                                   I);
7164   }
7165   case Instruction::Store:
7166   case Instruction::Load: {
7167     ElementCount Width = VF;
7168     if (Width.isVector()) {
7169       InstWidening Decision = getWideningDecision(I, Width);
7170       assert(Decision != CM_Unknown &&
7171              "CM decision should be taken at this point");
7172       if (Decision == CM_Scalarize)
7173         Width = ElementCount::getFixed(1);
7174     }
7175     VectorTy = ToVectorTy(getLoadStoreType(I), Width);
7176     return getMemoryInstructionCost(I, VF);
7177   }
7178   case Instruction::BitCast:
7179     if (I->getType()->isPointerTy())
7180       return 0;
7181     LLVM_FALLTHROUGH;
7182   case Instruction::ZExt:
7183   case Instruction::SExt:
7184   case Instruction::FPToUI:
7185   case Instruction::FPToSI:
7186   case Instruction::FPExt:
7187   case Instruction::PtrToInt:
7188   case Instruction::IntToPtr:
7189   case Instruction::SIToFP:
7190   case Instruction::UIToFP:
7191   case Instruction::Trunc:
7192   case Instruction::FPTrunc: {
7193     // Computes the CastContextHint from a Load/Store instruction.
7194     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7195       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7196              "Expected a load or a store!");
7197 
7198       if (VF.isScalar() || !TheLoop->contains(I))
7199         return TTI::CastContextHint::Normal;
7200 
7201       switch (getWideningDecision(I, VF)) {
7202       case LoopVectorizationCostModel::CM_GatherScatter:
7203         return TTI::CastContextHint::GatherScatter;
7204       case LoopVectorizationCostModel::CM_Interleave:
7205         return TTI::CastContextHint::Interleave;
7206       case LoopVectorizationCostModel::CM_Scalarize:
7207       case LoopVectorizationCostModel::CM_Widen:
7208         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7209                                         : TTI::CastContextHint::Normal;
7210       case LoopVectorizationCostModel::CM_Widen_Reverse:
7211         return TTI::CastContextHint::Reversed;
7212       case LoopVectorizationCostModel::CM_Unknown:
7213         llvm_unreachable("Instr did not go through cost modelling?");
7214       }
7215 
7216       llvm_unreachable("Unhandled case!");
7217     };
7218 
7219     unsigned Opcode = I->getOpcode();
7220     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7221     // For Trunc, the context is the only user, which must be a StoreInst.
7222     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7223       if (I->hasOneUse())
7224         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7225           CCH = ComputeCCH(Store);
7226     }
7227     // For Z/Sext, the context is the operand, which must be a LoadInst.
7228     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7229              Opcode == Instruction::FPExt) {
7230       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7231         CCH = ComputeCCH(Load);
7232     }
7233 
7234     // We optimize the truncation of induction variables having constant
7235     // integer steps. The cost of these truncations is the same as the scalar
7236     // operation.
7237     if (isOptimizableIVTruncate(I, VF)) {
7238       auto *Trunc = cast<TruncInst>(I);
7239       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7240                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7241     }
7242 
7243     // Detect reduction patterns
7244     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7245       return *RedCost;
7246 
7247     Type *SrcScalarTy = I->getOperand(0)->getType();
7248     Type *SrcVecTy =
7249         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7250     if (canTruncateToMinimalBitwidth(I, VF)) {
7251       // This cast is going to be shrunk. This may remove the cast or it might
7252       // turn it into slightly different cast. For example, if MinBW == 16,
7253       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7254       //
7255       // Calculate the modified src and dest types.
7256       Type *MinVecTy = VectorTy;
7257       if (Opcode == Instruction::Trunc) {
7258         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7259         VectorTy =
7260             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7261       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7262         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7263         VectorTy =
7264             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7265       }
7266     }
7267 
7268     return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7269   }
7270   case Instruction::Call: {
7271     if (RecurrenceDescriptor::isFMulAddIntrinsic(I))
7272       if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7273         return *RedCost;
7274     bool NeedToScalarize;
7275     CallInst *CI = cast<CallInst>(I);
7276     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7277     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7278       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7279       return std::min(CallCost, IntrinsicCost);
7280     }
7281     return CallCost;
7282   }
7283   case Instruction::ExtractValue:
7284     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7285   case Instruction::Alloca:
7286     // We cannot easily widen alloca to a scalable alloca, as
7287     // the result would need to be a vector of pointers.
7288     if (VF.isScalable())
7289       return InstructionCost::getInvalid();
7290     LLVM_FALLTHROUGH;
7291   default:
7292     // This opcode is unknown. Assume that it is the same as 'mul'.
7293     return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7294   } // end of switch.
7295 }
7296 
7297 char LoopVectorize::ID = 0;
7298 
7299 static const char lv_name[] = "Loop Vectorization";
7300 
7301 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7302 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7303 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7304 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7305 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7306 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7307 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7308 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7309 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7310 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7311 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7312 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7313 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7314 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7315 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7316 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7317 
7318 namespace llvm {
7319 
7320 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7321 
7322 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7323                               bool VectorizeOnlyWhenForced) {
7324   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7325 }
7326 
7327 } // end namespace llvm
7328 
7329 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7330   // Check if the pointer operand of a load or store instruction is
7331   // consecutive.
7332   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7333     return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr);
7334   return false;
7335 }
7336 
7337 void LoopVectorizationCostModel::collectValuesToIgnore() {
7338   // Ignore ephemeral values.
7339   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7340 
7341   // Ignore type-promoting instructions we identified during reduction
7342   // detection.
7343   for (auto &Reduction : Legal->getReductionVars()) {
7344     const RecurrenceDescriptor &RedDes = Reduction.second;
7345     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7346     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7347   }
7348   // Ignore type-casting instructions we identified during induction
7349   // detection.
7350   for (auto &Induction : Legal->getInductionVars()) {
7351     const InductionDescriptor &IndDes = Induction.second;
7352     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7353     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7354   }
7355 }
7356 
7357 void LoopVectorizationCostModel::collectInLoopReductions() {
7358   for (auto &Reduction : Legal->getReductionVars()) {
7359     PHINode *Phi = Reduction.first;
7360     const RecurrenceDescriptor &RdxDesc = Reduction.second;
7361 
7362     // We don't collect reductions that are type promoted (yet).
7363     if (RdxDesc.getRecurrenceType() != Phi->getType())
7364       continue;
7365 
7366     // If the target would prefer this reduction to happen "in-loop", then we
7367     // want to record it as such.
7368     unsigned Opcode = RdxDesc.getOpcode();
7369     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
7370         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7371                                    TargetTransformInfo::ReductionFlags()))
7372       continue;
7373 
7374     // Check that we can correctly put the reductions into the loop, by
7375     // finding the chain of operations that leads from the phi to the loop
7376     // exit value.
7377     SmallVector<Instruction *, 4> ReductionOperations =
7378         RdxDesc.getReductionOpChain(Phi, TheLoop);
7379     bool InLoop = !ReductionOperations.empty();
7380     if (InLoop) {
7381       InLoopReductionChains[Phi] = ReductionOperations;
7382       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7383       Instruction *LastChain = Phi;
7384       for (auto *I : ReductionOperations) {
7385         InLoopReductionImmediateChains[I] = LastChain;
7386         LastChain = I;
7387       }
7388     }
7389     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7390                       << " reduction for phi: " << *Phi << "\n");
7391   }
7392 }
7393 
7394 // TODO: we could return a pair of values that specify the max VF and
7395 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7396 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7397 // doesn't have a cost model that can choose which plan to execute if
7398 // more than one is generated.
7399 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7400                                  LoopVectorizationCostModel &CM) {
7401   unsigned WidestType;
7402   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7403   return WidestVectorRegBits / WidestType;
7404 }
7405 
7406 VectorizationFactor
7407 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7408   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7409   ElementCount VF = UserVF;
7410   // Outer loop handling: They may require CFG and instruction level
7411   // transformations before even evaluating whether vectorization is profitable.
7412   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7413   // the vectorization pipeline.
7414   if (!OrigLoop->isInnermost()) {
7415     // If the user doesn't provide a vectorization factor, determine a
7416     // reasonable one.
7417     if (UserVF.isZero()) {
7418       VF = ElementCount::getFixed(determineVPlanVF(
7419           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
7420               .getFixedSize(),
7421           CM));
7422       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7423 
7424       // Make sure we have a VF > 1 for stress testing.
7425       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7426         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7427                           << "overriding computed VF.\n");
7428         VF = ElementCount::getFixed(4);
7429       }
7430     }
7431     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7432     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7433            "VF needs to be a power of two");
7434     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7435                       << "VF " << VF << " to build VPlans.\n");
7436     buildVPlans(VF, VF);
7437 
7438     // For VPlan build stress testing, we bail out after VPlan construction.
7439     if (VPlanBuildStressTest)
7440       return VectorizationFactor::Disabled();
7441 
7442     return {VF, 0 /*Cost*/};
7443   }
7444 
7445   LLVM_DEBUG(
7446       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7447                 "VPlan-native path.\n");
7448   return VectorizationFactor::Disabled();
7449 }
7450 
7451 Optional<VectorizationFactor>
7452 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7453   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7454   FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
7455   if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
7456     return None;
7457 
7458   // Invalidate interleave groups if all blocks of loop will be predicated.
7459   if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
7460       !useMaskedInterleavedAccesses(*TTI)) {
7461     LLVM_DEBUG(
7462         dbgs()
7463         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7464            "which requires masked-interleaved support.\n");
7465     if (CM.InterleaveInfo.invalidateGroups())
7466       // Invalidating interleave groups also requires invalidating all decisions
7467       // based on them, which includes widening decisions and uniform and scalar
7468       // values.
7469       CM.invalidateCostModelingDecisions();
7470   }
7471 
7472   ElementCount MaxUserVF =
7473       UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
7474   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
7475   if (!UserVF.isZero() && UserVFIsLegal) {
7476     assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
7477            "VF needs to be a power of two");
7478     // Collect the instructions (and their associated costs) that will be more
7479     // profitable to scalarize.
7480     if (CM.selectUserVectorizationFactor(UserVF)) {
7481       LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
7482       CM.collectInLoopReductions();
7483       buildVPlansWithVPRecipes(UserVF, UserVF);
7484       LLVM_DEBUG(printPlans(dbgs()));
7485       return {{UserVF, 0}};
7486     } else
7487       reportVectorizationInfo("UserVF ignored because of invalid costs.",
7488                               "InvalidCost", ORE, OrigLoop);
7489   }
7490 
7491   // Populate the set of Vectorization Factor Candidates.
7492   ElementCountSet VFCandidates;
7493   for (auto VF = ElementCount::getFixed(1);
7494        ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
7495     VFCandidates.insert(VF);
7496   for (auto VF = ElementCount::getScalable(1);
7497        ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
7498     VFCandidates.insert(VF);
7499 
7500   for (const auto &VF : VFCandidates) {
7501     // Collect Uniform and Scalar instructions after vectorization with VF.
7502     CM.collectUniformsAndScalars(VF);
7503 
7504     // Collect the instructions (and their associated costs) that will be more
7505     // profitable to scalarize.
7506     if (VF.isVector())
7507       CM.collectInstsToScalarize(VF);
7508   }
7509 
7510   CM.collectInLoopReductions();
7511   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
7512   buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
7513 
7514   LLVM_DEBUG(printPlans(dbgs()));
7515   if (!MaxFactors.hasVector())
7516     return VectorizationFactor::Disabled();
7517 
7518   // Select the optimal vectorization factor.
7519   auto SelectedVF = CM.selectVectorizationFactor(VFCandidates);
7520 
7521   // Check if it is profitable to vectorize with runtime checks.
7522   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
7523   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
7524     bool PragmaThresholdReached =
7525         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
7526     bool ThresholdReached =
7527         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
7528     if ((ThresholdReached && !Hints.allowReordering()) ||
7529         PragmaThresholdReached) {
7530       ORE->emit([&]() {
7531         return OptimizationRemarkAnalysisAliasing(
7532                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
7533                    OrigLoop->getHeader())
7534                << "loop not vectorized: cannot prove it is safe to reorder "
7535                   "memory operations";
7536       });
7537       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
7538       Hints.emitRemarkWithHints();
7539       return VectorizationFactor::Disabled();
7540     }
7541   }
7542   return SelectedVF;
7543 }
7544 
7545 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const {
7546   assert(count_if(VPlans,
7547                   [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) ==
7548              1 &&
7549          "Best VF has not a single VPlan.");
7550 
7551   for (const VPlanPtr &Plan : VPlans) {
7552     if (Plan->hasVF(VF))
7553       return *Plan.get();
7554   }
7555   llvm_unreachable("No plan found!");
7556 }
7557 
7558 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7559   SmallVector<Metadata *, 4> MDs;
7560   // Reserve first location for self reference to the LoopID metadata node.
7561   MDs.push_back(nullptr);
7562   bool IsUnrollMetadata = false;
7563   MDNode *LoopID = L->getLoopID();
7564   if (LoopID) {
7565     // First find existing loop unrolling disable metadata.
7566     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7567       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7568       if (MD) {
7569         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7570         IsUnrollMetadata =
7571             S && S->getString().startswith("llvm.loop.unroll.disable");
7572       }
7573       MDs.push_back(LoopID->getOperand(i));
7574     }
7575   }
7576 
7577   if (!IsUnrollMetadata) {
7578     // Add runtime unroll disable metadata.
7579     LLVMContext &Context = L->getHeader()->getContext();
7580     SmallVector<Metadata *, 1> DisableOperands;
7581     DisableOperands.push_back(
7582         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
7583     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
7584     MDs.push_back(DisableNode);
7585     MDNode *NewLoopID = MDNode::get(Context, MDs);
7586     // Set operand 0 to refer to the loop id itself.
7587     NewLoopID->replaceOperandWith(0, NewLoopID);
7588     L->setLoopID(NewLoopID);
7589   }
7590 }
7591 
7592 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
7593                                            VPlan &BestVPlan,
7594                                            InnerLoopVectorizer &ILV,
7595                                            DominatorTree *DT) {
7596   LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF
7597                     << '\n');
7598 
7599   // Perform the actual loop transformation.
7600 
7601   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7602   VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan};
7603   Value *CanonicalIVStartValue;
7604   std::tie(State.CFG.VectorPreHeader, CanonicalIVStartValue) =
7605       ILV.createVectorizedLoopSkeleton();
7606   ILV.collectPoisonGeneratingRecipes(State);
7607 
7608   ILV.printDebugTracesAtStart();
7609 
7610   //===------------------------------------------------===//
7611   //
7612   // Notice: any optimization or new instruction that go
7613   // into the code below should also be implemented in
7614   // the cost-model.
7615   //
7616   //===------------------------------------------------===//
7617 
7618   // 2. Copy and widen instructions from the old loop into the new loop.
7619   BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr),
7620                              ILV.getOrCreateVectorTripCount(nullptr),
7621                              CanonicalIVStartValue, State);
7622   BestVPlan.execute(&State);
7623 
7624   // Keep all loop hints from the original loop on the vector loop (we'll
7625   // replace the vectorizer-specific hints below).
7626   MDNode *OrigLoopID = OrigLoop->getLoopID();
7627 
7628   Optional<MDNode *> VectorizedLoopID =
7629       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
7630                                       LLVMLoopVectorizeFollowupVectorized});
7631 
7632   Loop *L = LI->getLoopFor(State.CFG.PrevBB);
7633   if (VectorizedLoopID.hasValue())
7634     L->setLoopID(VectorizedLoopID.getValue());
7635   else {
7636     // Keep all loop hints from the original loop on the vector loop (we'll
7637     // replace the vectorizer-specific hints below).
7638     if (MDNode *LID = OrigLoop->getLoopID())
7639       L->setLoopID(LID);
7640 
7641     LoopVectorizeHints Hints(L, true, *ORE);
7642     Hints.setAlreadyVectorized();
7643   }
7644   // Disable runtime unrolling when vectorizing the epilogue loop.
7645   if (CanonicalIVStartValue)
7646     AddRuntimeUnrollDisableMetaData(L);
7647 
7648   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7649   //    predication, updating analyses.
7650   ILV.fixVectorizedLoop(State);
7651 
7652   ILV.printDebugTracesAtEnd();
7653 }
7654 
7655 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
7656 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
7657   for (const auto &Plan : VPlans)
7658     if (PrintVPlansInDotFormat)
7659       Plan->printDOT(O);
7660     else
7661       Plan->print(O);
7662 }
7663 #endif
7664 
7665 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
7666     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
7667 
7668   // We create new control-flow for the vectorized loop, so the original exit
7669   // conditions will be dead after vectorization if it's only used by the
7670   // terminator
7671   SmallVector<BasicBlock*> ExitingBlocks;
7672   OrigLoop->getExitingBlocks(ExitingBlocks);
7673   for (auto *BB : ExitingBlocks) {
7674     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
7675     if (!Cmp || !Cmp->hasOneUse())
7676       continue;
7677 
7678     // TODO: we should introduce a getUniqueExitingBlocks on Loop
7679     if (!DeadInstructions.insert(Cmp).second)
7680       continue;
7681 
7682     // The operands of the icmp is often a dead trunc, used by IndUpdate.
7683     // TODO: can recurse through operands in general
7684     for (Value *Op : Cmp->operands()) {
7685       if (isa<TruncInst>(Op) && Op->hasOneUse())
7686           DeadInstructions.insert(cast<Instruction>(Op));
7687     }
7688   }
7689 
7690   // We create new "steps" for induction variable updates to which the original
7691   // induction variables map. An original update instruction will be dead if
7692   // all its users except the induction variable are dead.
7693   auto *Latch = OrigLoop->getLoopLatch();
7694   for (auto &Induction : Legal->getInductionVars()) {
7695     PHINode *Ind = Induction.first;
7696     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
7697 
7698     // If the tail is to be folded by masking, the primary induction variable,
7699     // if exists, isn't dead: it will be used for masking. Don't kill it.
7700     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
7701       continue;
7702 
7703     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
7704           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
7705         }))
7706       DeadInstructions.insert(IndUpdate);
7707   }
7708 }
7709 
7710 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
7711 
7712 //===--------------------------------------------------------------------===//
7713 // EpilogueVectorizerMainLoop
7714 //===--------------------------------------------------------------------===//
7715 
7716 /// This function is partially responsible for generating the control flow
7717 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7718 std::pair<BasicBlock *, Value *>
7719 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
7720   MDNode *OrigLoopID = OrigLoop->getLoopID();
7721 
7722   // Workaround!  Compute the trip count of the original loop and cache it
7723   // before we start modifying the CFG.  This code has a systemic problem
7724   // wherein it tries to run analysis over partially constructed IR; this is
7725   // wrong, and not simply for SCEV.  The trip count of the original loop
7726   // simply happens to be prone to hitting this in practice.  In theory, we
7727   // can hit the same issue for any SCEV, or ValueTracking query done during
7728   // mutation.  See PR49900.
7729   getOrCreateTripCount(OrigLoop->getLoopPreheader());
7730   createVectorLoopSkeleton("");
7731 
7732   // Generate the code to check the minimum iteration count of the vector
7733   // epilogue (see below).
7734   EPI.EpilogueIterationCountCheck =
7735       emitMinimumIterationCountCheck(LoopScalarPreHeader, true);
7736   EPI.EpilogueIterationCountCheck->setName("iter.check");
7737 
7738   // Generate the code to check any assumptions that we've made for SCEV
7739   // expressions.
7740   EPI.SCEVSafetyCheck = emitSCEVChecks(LoopScalarPreHeader);
7741 
7742   // Generate the code that checks at runtime if arrays overlap. We put the
7743   // checks into a separate block to make the more common case of few elements
7744   // faster.
7745   EPI.MemSafetyCheck = emitMemRuntimeChecks(LoopScalarPreHeader);
7746 
7747   // Generate the iteration count check for the main loop, *after* the check
7748   // for the epilogue loop, so that the path-length is shorter for the case
7749   // that goes directly through the vector epilogue. The longer-path length for
7750   // the main loop is compensated for, by the gain from vectorizing the larger
7751   // trip count. Note: the branch will get updated later on when we vectorize
7752   // the epilogue.
7753   EPI.MainLoopIterationCountCheck =
7754       emitMinimumIterationCountCheck(LoopScalarPreHeader, false);
7755 
7756   // Generate the induction variable.
7757   Value *CountRoundDown = getOrCreateVectorTripCount(LoopVectorPreHeader);
7758   EPI.VectorTripCount = CountRoundDown;
7759 
7760   // Skip induction resume value creation here because they will be created in
7761   // the second pass. If we created them here, they wouldn't be used anyway,
7762   // because the vplan in the second pass still contains the inductions from the
7763   // original loop.
7764 
7765   return {completeLoopSkeleton(OrigLoopID), nullptr};
7766 }
7767 
7768 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
7769   LLVM_DEBUG({
7770     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
7771            << "Main Loop VF:" << EPI.MainLoopVF
7772            << ", Main Loop UF:" << EPI.MainLoopUF
7773            << ", Epilogue Loop VF:" << EPI.EpilogueVF
7774            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7775   });
7776 }
7777 
7778 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
7779   DEBUG_WITH_TYPE(VerboseDebug, {
7780     dbgs() << "intermediate fn:\n"
7781            << *OrigLoop->getHeader()->getParent() << "\n";
7782   });
7783 }
7784 
7785 BasicBlock *
7786 EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(BasicBlock *Bypass,
7787                                                            bool ForEpilogue) {
7788   assert(Bypass && "Expected valid bypass basic block.");
7789   ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF;
7790   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
7791   Value *Count = getOrCreateTripCount(LoopVectorPreHeader);
7792   // Reuse existing vector loop preheader for TC checks.
7793   // Note that new preheader block is generated for vector loop.
7794   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
7795   IRBuilder<> Builder(TCCheckBlock->getTerminator());
7796 
7797   // Generate code to check if the loop's trip count is less than VF * UF of the
7798   // main vector loop.
7799   auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ?
7800       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
7801 
7802   Value *CheckMinIters = Builder.CreateICmp(
7803       P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor),
7804       "min.iters.check");
7805 
7806   if (!ForEpilogue)
7807     TCCheckBlock->setName("vector.main.loop.iter.check");
7808 
7809   // Create new preheader for vector loop.
7810   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
7811                                    DT, LI, nullptr, "vector.ph");
7812 
7813   if (ForEpilogue) {
7814     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
7815                                  DT->getNode(Bypass)->getIDom()) &&
7816            "TC check is expected to dominate Bypass");
7817 
7818     // Update dominator for Bypass & LoopExit.
7819     DT->changeImmediateDominator(Bypass, TCCheckBlock);
7820     if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
7821       // For loops with multiple exits, there's no edge from the middle block
7822       // to exit blocks (as the epilogue must run) and thus no need to update
7823       // the immediate dominator of the exit blocks.
7824       DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
7825 
7826     LoopBypassBlocks.push_back(TCCheckBlock);
7827 
7828     // Save the trip count so we don't have to regenerate it in the
7829     // vec.epilog.iter.check. This is safe to do because the trip count
7830     // generated here dominates the vector epilog iter check.
7831     EPI.TripCount = Count;
7832   }
7833 
7834   ReplaceInstWithInst(
7835       TCCheckBlock->getTerminator(),
7836       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
7837 
7838   return TCCheckBlock;
7839 }
7840 
7841 //===--------------------------------------------------------------------===//
7842 // EpilogueVectorizerEpilogueLoop
7843 //===--------------------------------------------------------------------===//
7844 
7845 /// This function is partially responsible for generating the control flow
7846 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7847 std::pair<BasicBlock *, Value *>
7848 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
7849   MDNode *OrigLoopID = OrigLoop->getLoopID();
7850   createVectorLoopSkeleton("vec.epilog.");
7851 
7852   // Now, compare the remaining count and if there aren't enough iterations to
7853   // execute the vectorized epilogue skip to the scalar part.
7854   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
7855   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
7856   LoopVectorPreHeader =
7857       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
7858                  LI, nullptr, "vec.epilog.ph");
7859   emitMinimumVectorEpilogueIterCountCheck(LoopScalarPreHeader,
7860                                           VecEpilogueIterationCountCheck);
7861 
7862   // Adjust the control flow taking the state info from the main loop
7863   // vectorization into account.
7864   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
7865          "expected this to be saved from the previous pass.");
7866   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
7867       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
7868 
7869   DT->changeImmediateDominator(LoopVectorPreHeader,
7870                                EPI.MainLoopIterationCountCheck);
7871 
7872   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
7873       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
7874 
7875   if (EPI.SCEVSafetyCheck)
7876     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
7877         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
7878   if (EPI.MemSafetyCheck)
7879     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
7880         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
7881 
7882   DT->changeImmediateDominator(
7883       VecEpilogueIterationCountCheck,
7884       VecEpilogueIterationCountCheck->getSinglePredecessor());
7885 
7886   DT->changeImmediateDominator(LoopScalarPreHeader,
7887                                EPI.EpilogueIterationCountCheck);
7888   if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
7889     // If there is an epilogue which must run, there's no edge from the
7890     // middle block to exit blocks  and thus no need to update the immediate
7891     // dominator of the exit blocks.
7892     DT->changeImmediateDominator(LoopExitBlock,
7893                                  EPI.EpilogueIterationCountCheck);
7894 
7895   // Keep track of bypass blocks, as they feed start values to the induction
7896   // phis in the scalar loop preheader.
7897   if (EPI.SCEVSafetyCheck)
7898     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
7899   if (EPI.MemSafetyCheck)
7900     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
7901   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
7902 
7903   // The vec.epilog.iter.check block may contain Phi nodes from reductions which
7904   // merge control-flow from the latch block and the middle block. Update the
7905   // incoming values here and move the Phi into the preheader.
7906   SmallVector<PHINode *, 4> PhisInBlock;
7907   for (PHINode &Phi : VecEpilogueIterationCountCheck->phis())
7908     PhisInBlock.push_back(&Phi);
7909 
7910   for (PHINode *Phi : PhisInBlock) {
7911     Phi->replaceIncomingBlockWith(
7912         VecEpilogueIterationCountCheck->getSinglePredecessor(),
7913         VecEpilogueIterationCountCheck);
7914     Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
7915     if (EPI.SCEVSafetyCheck)
7916       Phi->removeIncomingValue(EPI.SCEVSafetyCheck);
7917     if (EPI.MemSafetyCheck)
7918       Phi->removeIncomingValue(EPI.MemSafetyCheck);
7919     Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI());
7920   }
7921 
7922   // Generate a resume induction for the vector epilogue and put it in the
7923   // vector epilogue preheader
7924   Type *IdxTy = Legal->getWidestInductionType();
7925   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
7926                                          LoopVectorPreHeader->getFirstNonPHI());
7927   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
7928   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
7929                            EPI.MainLoopIterationCountCheck);
7930 
7931   // Generate induction resume values. These variables save the new starting
7932   // indexes for the scalar loop. They are used to test if there are any tail
7933   // iterations left once the vector loop has completed.
7934   // Note that when the vectorized epilogue is skipped due to iteration count
7935   // check, then the resume value for the induction variable comes from
7936   // the trip count of the main vector loop, hence passing the AdditionalBypass
7937   // argument.
7938   createInductionResumeValues({VecEpilogueIterationCountCheck,
7939                                EPI.VectorTripCount} /* AdditionalBypass */);
7940 
7941   return {completeLoopSkeleton(OrigLoopID), EPResumeVal};
7942 }
7943 
7944 BasicBlock *
7945 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
7946     BasicBlock *Bypass, BasicBlock *Insert) {
7947 
7948   assert(EPI.TripCount &&
7949          "Expected trip count to have been safed in the first pass.");
7950   assert(
7951       (!isa<Instruction>(EPI.TripCount) ||
7952        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
7953       "saved trip count does not dominate insertion point.");
7954   Value *TC = EPI.TripCount;
7955   IRBuilder<> Builder(Insert->getTerminator());
7956   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
7957 
7958   // Generate code to check if the loop's trip count is less than VF * UF of the
7959   // vector epilogue loop.
7960   auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ?
7961       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
7962 
7963   Value *CheckMinIters =
7964       Builder.CreateICmp(P, Count,
7965                          createStepForVF(Builder, Count->getType(),
7966                                          EPI.EpilogueVF, EPI.EpilogueUF),
7967                          "min.epilog.iters.check");
7968 
7969   ReplaceInstWithInst(
7970       Insert->getTerminator(),
7971       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
7972 
7973   LoopBypassBlocks.push_back(Insert);
7974   return Insert;
7975 }
7976 
7977 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
7978   LLVM_DEBUG({
7979     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
7980            << "Epilogue Loop VF:" << EPI.EpilogueVF
7981            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7982   });
7983 }
7984 
7985 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
7986   DEBUG_WITH_TYPE(VerboseDebug, {
7987     dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
7988   });
7989 }
7990 
7991 bool LoopVectorizationPlanner::getDecisionAndClampRange(
7992     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
7993   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
7994   bool PredicateAtRangeStart = Predicate(Range.Start);
7995 
7996   for (ElementCount TmpVF = Range.Start * 2;
7997        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
7998     if (Predicate(TmpVF) != PredicateAtRangeStart) {
7999       Range.End = TmpVF;
8000       break;
8001     }
8002 
8003   return PredicateAtRangeStart;
8004 }
8005 
8006 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8007 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8008 /// of VF's starting at a given VF and extending it as much as possible. Each
8009 /// vectorization decision can potentially shorten this sub-range during
8010 /// buildVPlan().
8011 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8012                                            ElementCount MaxVF) {
8013   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8014   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8015     VFRange SubRange = {VF, MaxVFPlusOne};
8016     VPlans.push_back(buildVPlan(SubRange));
8017     VF = SubRange.End;
8018   }
8019 }
8020 
8021 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8022                                          VPlanPtr &Plan) {
8023   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8024 
8025   // Look for cached value.
8026   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8027   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8028   if (ECEntryIt != EdgeMaskCache.end())
8029     return ECEntryIt->second;
8030 
8031   VPValue *SrcMask = createBlockInMask(Src, Plan);
8032 
8033   // The terminator has to be a branch inst!
8034   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8035   assert(BI && "Unexpected terminator found");
8036 
8037   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8038     return EdgeMaskCache[Edge] = SrcMask;
8039 
8040   // If source is an exiting block, we know the exit edge is dynamically dead
8041   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8042   // adding uses of an otherwise potentially dead instruction.
8043   if (OrigLoop->isLoopExiting(Src))
8044     return EdgeMaskCache[Edge] = SrcMask;
8045 
8046   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8047   assert(EdgeMask && "No Edge Mask found for condition");
8048 
8049   if (BI->getSuccessor(0) != Dst)
8050     EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc());
8051 
8052   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8053     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8054     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8055     // The select version does not introduce new UB if SrcMask is false and
8056     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8057     VPValue *False = Plan->getOrAddVPValue(
8058         ConstantInt::getFalse(BI->getCondition()->getType()));
8059     EdgeMask =
8060         Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc());
8061   }
8062 
8063   return EdgeMaskCache[Edge] = EdgeMask;
8064 }
8065 
8066 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8067   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8068 
8069   // Look for cached value.
8070   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8071   if (BCEntryIt != BlockMaskCache.end())
8072     return BCEntryIt->second;
8073 
8074   // All-one mask is modelled as no-mask following the convention for masked
8075   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8076   VPValue *BlockMask = nullptr;
8077 
8078   if (OrigLoop->getHeader() == BB) {
8079     if (!CM.blockNeedsPredicationForAnyReason(BB))
8080       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8081 
8082     // Introduce the early-exit compare IV <= BTC to form header block mask.
8083     // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by
8084     // constructing the desired canonical IV in the header block as its first
8085     // non-phi instructions.
8086     assert(CM.foldTailByMasking() && "must fold the tail");
8087     VPBasicBlock *HeaderVPBB =
8088         Plan->getVectorLoopRegion()->getEntryBasicBlock();
8089     auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi();
8090     auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV());
8091     HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi());
8092 
8093     VPBuilder::InsertPointGuard Guard(Builder);
8094     Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint);
8095     if (CM.TTI.emitGetActiveLaneMask()) {
8096       VPValue *TC = Plan->getOrCreateTripCount();
8097       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC});
8098     } else {
8099       VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8100       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8101     }
8102     return BlockMaskCache[BB] = BlockMask;
8103   }
8104 
8105   // This is the block mask. We OR all incoming edges.
8106   for (auto *Predecessor : predecessors(BB)) {
8107     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8108     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8109       return BlockMaskCache[BB] = EdgeMask;
8110 
8111     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8112       BlockMask = EdgeMask;
8113       continue;
8114     }
8115 
8116     BlockMask = Builder.createOr(BlockMask, EdgeMask, {});
8117   }
8118 
8119   return BlockMaskCache[BB] = BlockMask;
8120 }
8121 
8122 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8123                                                 ArrayRef<VPValue *> Operands,
8124                                                 VFRange &Range,
8125                                                 VPlanPtr &Plan) {
8126   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8127          "Must be called with either a load or store");
8128 
8129   auto willWiden = [&](ElementCount VF) -> bool {
8130     if (VF.isScalar())
8131       return false;
8132     LoopVectorizationCostModel::InstWidening Decision =
8133         CM.getWideningDecision(I, VF);
8134     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8135            "CM decision should be taken at this point.");
8136     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8137       return true;
8138     if (CM.isScalarAfterVectorization(I, VF) ||
8139         CM.isProfitableToScalarize(I, VF))
8140       return false;
8141     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8142   };
8143 
8144   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8145     return nullptr;
8146 
8147   VPValue *Mask = nullptr;
8148   if (Legal->isMaskRequired(I))
8149     Mask = createBlockInMask(I->getParent(), Plan);
8150 
8151   // Determine if the pointer operand of the access is either consecutive or
8152   // reverse consecutive.
8153   LoopVectorizationCostModel::InstWidening Decision =
8154       CM.getWideningDecision(I, Range.Start);
8155   bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse;
8156   bool Consecutive =
8157       Reverse || Decision == LoopVectorizationCostModel::CM_Widen;
8158 
8159   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8160     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask,
8161                                               Consecutive, Reverse);
8162 
8163   StoreInst *Store = cast<StoreInst>(I);
8164   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8165                                             Mask, Consecutive, Reverse);
8166 }
8167 
8168 static VPWidenIntOrFpInductionRecipe *
8169 createWidenInductionRecipe(PHINode *Phi, Instruction *PhiOrTrunc,
8170                            VPValue *Start, const InductionDescriptor &IndDesc,
8171                            LoopVectorizationCostModel &CM, ScalarEvolution &SE,
8172                            Loop &OrigLoop, VFRange &Range) {
8173   // Returns true if an instruction \p I should be scalarized instead of
8174   // vectorized for the chosen vectorization factor.
8175   auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) {
8176     return CM.isScalarAfterVectorization(I, VF) ||
8177            CM.isProfitableToScalarize(I, VF);
8178   };
8179 
8180   bool NeedsScalarIV = LoopVectorizationPlanner::getDecisionAndClampRange(
8181       [&](ElementCount VF) {
8182         // Returns true if we should generate a scalar version of \p IV.
8183         if (ShouldScalarizeInstruction(PhiOrTrunc, VF))
8184           return true;
8185         auto isScalarInst = [&](User *U) -> bool {
8186           auto *I = cast<Instruction>(U);
8187           return OrigLoop.contains(I) && ShouldScalarizeInstruction(I, VF);
8188         };
8189         return any_of(PhiOrTrunc->users(), isScalarInst);
8190       },
8191       Range);
8192   bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange(
8193       [&](ElementCount VF) {
8194         return ShouldScalarizeInstruction(PhiOrTrunc, VF);
8195       },
8196       Range);
8197   assert(IndDesc.getStartValue() ==
8198          Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader()));
8199   assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) &&
8200          "step must be loop invariant");
8201   if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) {
8202     return new VPWidenIntOrFpInductionRecipe(
8203         Phi, Start, IndDesc, TruncI, NeedsScalarIV, !NeedsScalarIVOnly, SE);
8204   }
8205   assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here");
8206   return new VPWidenIntOrFpInductionRecipe(Phi, Start, IndDesc, NeedsScalarIV,
8207                                            !NeedsScalarIVOnly, SE);
8208 }
8209 
8210 VPRecipeBase *VPRecipeBuilder::tryToOptimizeInductionPHI(
8211     PHINode *Phi, ArrayRef<VPValue *> Operands, VFRange &Range) const {
8212 
8213   // Check if this is an integer or fp induction. If so, build the recipe that
8214   // produces its scalar and vector values.
8215   if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi))
8216     return createWidenInductionRecipe(Phi, Phi, Operands[0], *II, CM,
8217                                       *PSE.getSE(), *OrigLoop, Range);
8218 
8219   // Check if this is pointer induction. If so, build the recipe for it.
8220   if (auto *II = Legal->getPointerInductionDescriptor(Phi))
8221     return new VPWidenPointerInductionRecipe(Phi, Operands[0], *II,
8222                                              *PSE.getSE());
8223   return nullptr;
8224 }
8225 
8226 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8227     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range,
8228     VPlan &Plan) const {
8229   // Optimize the special case where the source is a constant integer
8230   // induction variable. Notice that we can only optimize the 'trunc' case
8231   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8232   // (c) other casts depend on pointer size.
8233 
8234   // Determine whether \p K is a truncation based on an induction variable that
8235   // can be optimized.
8236   auto isOptimizableIVTruncate =
8237       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8238     return [=](ElementCount VF) -> bool {
8239       return CM.isOptimizableIVTruncate(K, VF);
8240     };
8241   };
8242 
8243   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8244           isOptimizableIVTruncate(I), Range)) {
8245 
8246     auto *Phi = cast<PHINode>(I->getOperand(0));
8247     const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi);
8248     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8249     return createWidenInductionRecipe(Phi, I, Start, II, CM, *PSE.getSE(),
8250                                       *OrigLoop, Range);
8251   }
8252   return nullptr;
8253 }
8254 
8255 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8256                                                 ArrayRef<VPValue *> Operands,
8257                                                 VPlanPtr &Plan) {
8258   // If all incoming values are equal, the incoming VPValue can be used directly
8259   // instead of creating a new VPBlendRecipe.
8260   VPValue *FirstIncoming = Operands[0];
8261   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8262         return FirstIncoming == Inc;
8263       })) {
8264     return Operands[0];
8265   }
8266 
8267   unsigned NumIncoming = Phi->getNumIncomingValues();
8268   // For in-loop reductions, we do not need to create an additional select.
8269   VPValue *InLoopVal = nullptr;
8270   for (unsigned In = 0; In < NumIncoming; In++) {
8271     PHINode *PhiOp =
8272         dyn_cast_or_null<PHINode>(Operands[In]->getUnderlyingValue());
8273     if (PhiOp && CM.isInLoopReduction(PhiOp)) {
8274       assert(!InLoopVal && "Found more than one in-loop reduction!");
8275       InLoopVal = Operands[In];
8276     }
8277   }
8278 
8279   assert((!InLoopVal || NumIncoming == 2) &&
8280          "Found an in-loop reduction for PHI with unexpected number of "
8281          "incoming values");
8282   if (InLoopVal)
8283     return Operands[Operands[0] == InLoopVal ? 1 : 0];
8284 
8285   // We know that all PHIs in non-header blocks are converted into selects, so
8286   // we don't have to worry about the insertion order and we can just use the
8287   // builder. At this point we generate the predication tree. There may be
8288   // duplications since this is a simple recursive scan, but future
8289   // optimizations will clean it up.
8290   SmallVector<VPValue *, 2> OperandsWithMask;
8291 
8292   for (unsigned In = 0; In < NumIncoming; In++) {
8293     VPValue *EdgeMask =
8294       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8295     assert((EdgeMask || NumIncoming == 1) &&
8296            "Multiple predecessors with one having a full mask");
8297     OperandsWithMask.push_back(Operands[In]);
8298     if (EdgeMask)
8299       OperandsWithMask.push_back(EdgeMask);
8300   }
8301   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8302 }
8303 
8304 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8305                                                    ArrayRef<VPValue *> Operands,
8306                                                    VFRange &Range) const {
8307 
8308   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8309       [this, CI](ElementCount VF) {
8310         return CM.isScalarWithPredication(CI, VF);
8311       },
8312       Range);
8313 
8314   if (IsPredicated)
8315     return nullptr;
8316 
8317   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8318   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8319              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8320              ID == Intrinsic::pseudoprobe ||
8321              ID == Intrinsic::experimental_noalias_scope_decl))
8322     return nullptr;
8323 
8324   auto willWiden = [&](ElementCount VF) -> bool {
8325     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8326     // The following case may be scalarized depending on the VF.
8327     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8328     // version of the instruction.
8329     // Is it beneficial to perform intrinsic call compared to lib call?
8330     bool NeedToScalarize = false;
8331     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8332     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8333     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8334     return UseVectorIntrinsic || !NeedToScalarize;
8335   };
8336 
8337   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8338     return nullptr;
8339 
8340   ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size());
8341   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8342 }
8343 
8344 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8345   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8346          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8347   // Instruction should be widened, unless it is scalar after vectorization,
8348   // scalarization is profitable or it is predicated.
8349   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8350     return CM.isScalarAfterVectorization(I, VF) ||
8351            CM.isProfitableToScalarize(I, VF) ||
8352            CM.isScalarWithPredication(I, VF);
8353   };
8354   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8355                                                              Range);
8356 }
8357 
8358 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8359                                            ArrayRef<VPValue *> Operands) const {
8360   auto IsVectorizableOpcode = [](unsigned Opcode) {
8361     switch (Opcode) {
8362     case Instruction::Add:
8363     case Instruction::And:
8364     case Instruction::AShr:
8365     case Instruction::BitCast:
8366     case Instruction::FAdd:
8367     case Instruction::FCmp:
8368     case Instruction::FDiv:
8369     case Instruction::FMul:
8370     case Instruction::FNeg:
8371     case Instruction::FPExt:
8372     case Instruction::FPToSI:
8373     case Instruction::FPToUI:
8374     case Instruction::FPTrunc:
8375     case Instruction::FRem:
8376     case Instruction::FSub:
8377     case Instruction::ICmp:
8378     case Instruction::IntToPtr:
8379     case Instruction::LShr:
8380     case Instruction::Mul:
8381     case Instruction::Or:
8382     case Instruction::PtrToInt:
8383     case Instruction::SDiv:
8384     case Instruction::Select:
8385     case Instruction::SExt:
8386     case Instruction::Shl:
8387     case Instruction::SIToFP:
8388     case Instruction::SRem:
8389     case Instruction::Sub:
8390     case Instruction::Trunc:
8391     case Instruction::UDiv:
8392     case Instruction::UIToFP:
8393     case Instruction::URem:
8394     case Instruction::Xor:
8395     case Instruction::ZExt:
8396       return true;
8397     }
8398     return false;
8399   };
8400 
8401   if (!IsVectorizableOpcode(I->getOpcode()))
8402     return nullptr;
8403 
8404   // Success: widen this instruction.
8405   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8406 }
8407 
8408 void VPRecipeBuilder::fixHeaderPhis() {
8409   BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
8410   for (VPHeaderPHIRecipe *R : PhisToFix) {
8411     auto *PN = cast<PHINode>(R->getUnderlyingValue());
8412     VPRecipeBase *IncR =
8413         getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8414     R->addOperand(IncR->getVPSingleValue());
8415   }
8416 }
8417 
8418 VPBasicBlock *VPRecipeBuilder::handleReplication(
8419     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8420     VPlanPtr &Plan) {
8421   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8422       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8423       Range);
8424 
8425   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8426       [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); },
8427       Range);
8428 
8429   // Even if the instruction is not marked as uniform, there are certain
8430   // intrinsic calls that can be effectively treated as such, so we check for
8431   // them here. Conservatively, we only do this for scalable vectors, since
8432   // for fixed-width VFs we can always fall back on full scalarization.
8433   if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
8434     switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
8435     case Intrinsic::assume:
8436     case Intrinsic::lifetime_start:
8437     case Intrinsic::lifetime_end:
8438       // For scalable vectors if one of the operands is variant then we still
8439       // want to mark as uniform, which will generate one instruction for just
8440       // the first lane of the vector. We can't scalarize the call in the same
8441       // way as for fixed-width vectors because we don't know how many lanes
8442       // there are.
8443       //
8444       // The reasons for doing it this way for scalable vectors are:
8445       //   1. For the assume intrinsic generating the instruction for the first
8446       //      lane is still be better than not generating any at all. For
8447       //      example, the input may be a splat across all lanes.
8448       //   2. For the lifetime start/end intrinsics the pointer operand only
8449       //      does anything useful when the input comes from a stack object,
8450       //      which suggests it should always be uniform. For non-stack objects
8451       //      the effect is to poison the object, which still allows us to
8452       //      remove the call.
8453       IsUniform = true;
8454       break;
8455     default:
8456       break;
8457     }
8458   }
8459 
8460   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8461                                        IsUniform, IsPredicated);
8462   setRecipe(I, Recipe);
8463   Plan->addVPValue(I, Recipe);
8464 
8465   // Find if I uses a predicated instruction. If so, it will use its scalar
8466   // value. Avoid hoisting the insert-element which packs the scalar value into
8467   // a vector value, as that happens iff all users use the vector value.
8468   for (VPValue *Op : Recipe->operands()) {
8469     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
8470     if (!PredR)
8471       continue;
8472     auto *RepR =
8473         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
8474     assert(RepR->isPredicated() &&
8475            "expected Replicate recipe to be predicated");
8476     RepR->setAlsoPack(false);
8477   }
8478 
8479   // Finalize the recipe for Instr, first if it is not predicated.
8480   if (!IsPredicated) {
8481     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8482     VPBB->appendRecipe(Recipe);
8483     return VPBB;
8484   }
8485   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8486 
8487   VPBlockBase *SingleSucc = VPBB->getSingleSuccessor();
8488   assert(SingleSucc && "VPBB must have a single successor when handling "
8489                        "predicated replication.");
8490   VPBlockUtils::disconnectBlocks(VPBB, SingleSucc);
8491   // Record predicated instructions for above packing optimizations.
8492   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8493   VPBlockUtils::insertBlockAfter(Region, VPBB);
8494   auto *RegSucc = new VPBasicBlock();
8495   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8496   VPBlockUtils::connectBlocks(RegSucc, SingleSucc);
8497   return RegSucc;
8498 }
8499 
8500 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8501                                                       VPRecipeBase *PredRecipe,
8502                                                       VPlanPtr &Plan) {
8503   // Instructions marked for predication are replicated and placed under an
8504   // if-then construct to prevent side-effects.
8505 
8506   // Generate recipes to compute the block mask for this region.
8507   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8508 
8509   // Build the triangular if-then region.
8510   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8511   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8512   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8513   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8514   auto *PHIRecipe = Instr->getType()->isVoidTy()
8515                         ? nullptr
8516                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8517   if (PHIRecipe) {
8518     Plan->removeVPValueFor(Instr);
8519     Plan->addVPValue(Instr, PHIRecipe);
8520   }
8521   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8522   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8523   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8524 
8525   // Note: first set Entry as region entry and then connect successors starting
8526   // from it in order, to propagate the "parent" of each VPBasicBlock.
8527   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8528   VPBlockUtils::connectBlocks(Pred, Exit);
8529 
8530   return Region;
8531 }
8532 
8533 VPRecipeOrVPValueTy
8534 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8535                                         ArrayRef<VPValue *> Operands,
8536                                         VFRange &Range, VPlanPtr &Plan) {
8537   // First, check for specific widening recipes that deal with calls, memory
8538   // operations, inductions and Phi nodes.
8539   if (auto *CI = dyn_cast<CallInst>(Instr))
8540     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
8541 
8542   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8543     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
8544 
8545   VPRecipeBase *Recipe;
8546   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8547     if (Phi->getParent() != OrigLoop->getHeader())
8548       return tryToBlend(Phi, Operands, Plan);
8549     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range)))
8550       return toVPRecipeResult(Recipe);
8551 
8552     VPHeaderPHIRecipe *PhiRecipe = nullptr;
8553     if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) {
8554       VPValue *StartV = Operands[0];
8555       if (Legal->isReductionVariable(Phi)) {
8556         const RecurrenceDescriptor &RdxDesc =
8557             Legal->getReductionVars().find(Phi)->second;
8558         assert(RdxDesc.getRecurrenceStartValue() ==
8559                Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8560         PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV,
8561                                              CM.isInLoopReduction(Phi),
8562                                              CM.useOrderedReductions(RdxDesc));
8563       } else {
8564         PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
8565       }
8566 
8567       // Record the incoming value from the backedge, so we can add the incoming
8568       // value from the backedge after all recipes have been created.
8569       recordRecipeOf(cast<Instruction>(
8570           Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
8571       PhisToFix.push_back(PhiRecipe);
8572     } else {
8573       // TODO: record backedge value for remaining pointer induction phis.
8574       assert(Phi->getType()->isPointerTy() &&
8575              "only pointer phis should be handled here");
8576       assert(Legal->getInductionVars().count(Phi) &&
8577              "Not an induction variable");
8578       InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8579       VPValue *Start = Plan->getOrAddVPValue(II.getStartValue());
8580       PhiRecipe = new VPWidenPHIRecipe(Phi, Start);
8581     }
8582 
8583     return toVPRecipeResult(PhiRecipe);
8584   }
8585 
8586   if (isa<TruncInst>(Instr) &&
8587       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
8588                                                Range, *Plan)))
8589     return toVPRecipeResult(Recipe);
8590 
8591   if (!shouldWiden(Instr, Range))
8592     return nullptr;
8593 
8594   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8595     return toVPRecipeResult(new VPWidenGEPRecipe(
8596         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
8597 
8598   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8599     bool InvariantCond =
8600         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8601     return toVPRecipeResult(new VPWidenSelectRecipe(
8602         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
8603   }
8604 
8605   return toVPRecipeResult(tryToWiden(Instr, Operands));
8606 }
8607 
8608 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8609                                                         ElementCount MaxVF) {
8610   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8611 
8612   // Collect instructions from the original loop that will become trivially dead
8613   // in the vectorized loop. We don't need to vectorize these instructions. For
8614   // example, original induction update instructions can become dead because we
8615   // separately emit induction "steps" when generating code for the new loop.
8616   // Similarly, we create a new latch condition when setting up the structure
8617   // of the new loop, so the old one can become dead.
8618   SmallPtrSet<Instruction *, 4> DeadInstructions;
8619   collectTriviallyDeadInstructions(DeadInstructions);
8620 
8621   // Add assume instructions we need to drop to DeadInstructions, to prevent
8622   // them from being added to the VPlan.
8623   // TODO: We only need to drop assumes in blocks that get flattend. If the
8624   // control flow is preserved, we should keep them.
8625   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8626   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8627 
8628   MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8629   // Dead instructions do not need sinking. Remove them from SinkAfter.
8630   for (Instruction *I : DeadInstructions)
8631     SinkAfter.erase(I);
8632 
8633   // Cannot sink instructions after dead instructions (there won't be any
8634   // recipes for them). Instead, find the first non-dead previous instruction.
8635   for (auto &P : Legal->getSinkAfter()) {
8636     Instruction *SinkTarget = P.second;
8637     Instruction *FirstInst = &*SinkTarget->getParent()->begin();
8638     (void)FirstInst;
8639     while (DeadInstructions.contains(SinkTarget)) {
8640       assert(
8641           SinkTarget != FirstInst &&
8642           "Must find a live instruction (at least the one feeding the "
8643           "first-order recurrence PHI) before reaching beginning of the block");
8644       SinkTarget = SinkTarget->getPrevNode();
8645       assert(SinkTarget != P.first &&
8646              "sink source equals target, no sinking required");
8647     }
8648     P.second = SinkTarget;
8649   }
8650 
8651   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8652   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8653     VFRange SubRange = {VF, MaxVFPlusOne};
8654     VPlans.push_back(
8655         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
8656     VF = SubRange.End;
8657   }
8658 }
8659 
8660 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a
8661 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a
8662 // BranchOnCount VPInstruction to the latch.
8663 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL,
8664                                   bool HasNUW, bool IsVPlanNative) {
8665   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8666   auto *StartV = Plan.getOrAddVPValue(StartIdx);
8667 
8668   auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL);
8669   VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
8670   VPBasicBlock *Header = TopRegion->getEntryBasicBlock();
8671   if (IsVPlanNative)
8672     Header = cast<VPBasicBlock>(Header->getSingleSuccessor());
8673   Header->insert(CanonicalIVPHI, Header->begin());
8674 
8675   auto *CanonicalIVIncrement =
8676       new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW
8677                                : VPInstruction::CanonicalIVIncrement,
8678                         {CanonicalIVPHI}, DL);
8679   CanonicalIVPHI->addOperand(CanonicalIVIncrement);
8680 
8681   VPBasicBlock *EB = TopRegion->getExitBasicBlock();
8682   if (IsVPlanNative) {
8683     EB = cast<VPBasicBlock>(EB->getSinglePredecessor());
8684     EB->setCondBit(nullptr);
8685   }
8686   EB->appendRecipe(CanonicalIVIncrement);
8687 
8688   auto *BranchOnCount =
8689       new VPInstruction(VPInstruction::BranchOnCount,
8690                         {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL);
8691   EB->appendRecipe(BranchOnCount);
8692 }
8693 
8694 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
8695     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
8696     const MapVector<Instruction *, Instruction *> &SinkAfter) {
8697 
8698   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8699 
8700   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8701 
8702   // ---------------------------------------------------------------------------
8703   // Pre-construction: record ingredients whose recipes we'll need to further
8704   // process after constructing the initial VPlan.
8705   // ---------------------------------------------------------------------------
8706 
8707   // Mark instructions we'll need to sink later and their targets as
8708   // ingredients whose recipe we'll need to record.
8709   for (auto &Entry : SinkAfter) {
8710     RecipeBuilder.recordRecipeOf(Entry.first);
8711     RecipeBuilder.recordRecipeOf(Entry.second);
8712   }
8713   for (auto &Reduction : CM.getInLoopReductionChains()) {
8714     PHINode *Phi = Reduction.first;
8715     RecurKind Kind =
8716         Legal->getReductionVars().find(Phi)->second.getRecurrenceKind();
8717     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8718 
8719     RecipeBuilder.recordRecipeOf(Phi);
8720     for (auto &R : ReductionOperations) {
8721       RecipeBuilder.recordRecipeOf(R);
8722       // For min/max reductions, where we have a pair of icmp/select, we also
8723       // need to record the ICmp recipe, so it can be removed later.
8724       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
8725              "Only min/max recurrences allowed for inloop reductions");
8726       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
8727         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
8728     }
8729   }
8730 
8731   // For each interleave group which is relevant for this (possibly trimmed)
8732   // Range, add it to the set of groups to be later applied to the VPlan and add
8733   // placeholders for its members' Recipes which we'll be replacing with a
8734   // single VPInterleaveRecipe.
8735   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8736     auto applyIG = [IG, this](ElementCount VF) -> bool {
8737       return (VF.isVector() && // Query is illegal for VF == 1
8738               CM.getWideningDecision(IG->getInsertPos(), VF) ==
8739                   LoopVectorizationCostModel::CM_Interleave);
8740     };
8741     if (!getDecisionAndClampRange(applyIG, Range))
8742       continue;
8743     InterleaveGroups.insert(IG);
8744     for (unsigned i = 0; i < IG->getFactor(); i++)
8745       if (Instruction *Member = IG->getMember(i))
8746         RecipeBuilder.recordRecipeOf(Member);
8747   };
8748 
8749   // ---------------------------------------------------------------------------
8750   // Build initial VPlan: Scan the body of the loop in a topological order to
8751   // visit each basic block after having visited its predecessor basic blocks.
8752   // ---------------------------------------------------------------------------
8753 
8754   // Create initial VPlan skeleton, with separate header and latch blocks.
8755   VPBasicBlock *HeaderVPBB = new VPBasicBlock();
8756   VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch");
8757   VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB);
8758   auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop");
8759   auto Plan = std::make_unique<VPlan>(TopRegion);
8760 
8761   Instruction *DLInst =
8762       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
8763   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(),
8764                         DLInst ? DLInst->getDebugLoc() : DebugLoc(),
8765                         !CM.foldTailByMasking(), false);
8766 
8767   // Scan the body of the loop in a topological order to visit each basic block
8768   // after having visited its predecessor basic blocks.
8769   LoopBlocksDFS DFS(OrigLoop);
8770   DFS.perform(LI);
8771 
8772   VPBasicBlock *VPBB = HeaderVPBB;
8773   SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove;
8774   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
8775     // Relevant instructions from basic block BB will be grouped into VPRecipe
8776     // ingredients and fill a new VPBasicBlock.
8777     unsigned VPBBsForBB = 0;
8778     VPBB->setName(BB->getName());
8779     Builder.setInsertPoint(VPBB);
8780 
8781     // Introduce each ingredient into VPlan.
8782     // TODO: Model and preserve debug instrinsics in VPlan.
8783     for (Instruction &I : BB->instructionsWithoutDebug()) {
8784       Instruction *Instr = &I;
8785 
8786       // First filter out irrelevant instructions, to ensure no recipes are
8787       // built for them.
8788       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
8789         continue;
8790 
8791       SmallVector<VPValue *, 4> Operands;
8792       auto *Phi = dyn_cast<PHINode>(Instr);
8793       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
8794         Operands.push_back(Plan->getOrAddVPValue(
8795             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
8796       } else {
8797         auto OpRange = Plan->mapToVPValues(Instr->operands());
8798         Operands = {OpRange.begin(), OpRange.end()};
8799       }
8800       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
8801               Instr, Operands, Range, Plan)) {
8802         // If Instr can be simplified to an existing VPValue, use it.
8803         if (RecipeOrValue.is<VPValue *>()) {
8804           auto *VPV = RecipeOrValue.get<VPValue *>();
8805           Plan->addVPValue(Instr, VPV);
8806           // If the re-used value is a recipe, register the recipe for the
8807           // instruction, in case the recipe for Instr needs to be recorded.
8808           if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef()))
8809             RecipeBuilder.setRecipe(Instr, R);
8810           continue;
8811         }
8812         // Otherwise, add the new recipe.
8813         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
8814         for (auto *Def : Recipe->definedValues()) {
8815           auto *UV = Def->getUnderlyingValue();
8816           Plan->addVPValue(UV, Def);
8817         }
8818 
8819         if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) &&
8820             HeaderVPBB->getFirstNonPhi() != VPBB->end()) {
8821           // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section
8822           // of the header block. That can happen for truncates of induction
8823           // variables. Those recipes are moved to the phi section of the header
8824           // block after applying SinkAfter, which relies on the original
8825           // position of the trunc.
8826           assert(isa<TruncInst>(Instr));
8827           InductionsToMove.push_back(
8828               cast<VPWidenIntOrFpInductionRecipe>(Recipe));
8829         }
8830         RecipeBuilder.setRecipe(Instr, Recipe);
8831         VPBB->appendRecipe(Recipe);
8832         continue;
8833       }
8834 
8835       // Otherwise, if all widening options failed, Instruction is to be
8836       // replicated. This may create a successor for VPBB.
8837       VPBasicBlock *NextVPBB =
8838           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
8839       if (NextVPBB != VPBB) {
8840         VPBB = NextVPBB;
8841         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
8842                                     : "");
8843       }
8844     }
8845 
8846     VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB);
8847     VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor());
8848   }
8849 
8850   HeaderVPBB->setName("vector.body");
8851 
8852   // Fold the last, empty block into its predecessor.
8853   VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB);
8854   assert(VPBB && "expected to fold last (empty) block");
8855   // After here, VPBB should not be used.
8856   VPBB = nullptr;
8857 
8858   assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) &&
8859          !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() &&
8860          "entry block must be set to a VPRegionBlock having a non-empty entry "
8861          "VPBasicBlock");
8862   RecipeBuilder.fixHeaderPhis();
8863 
8864   // ---------------------------------------------------------------------------
8865   // Transform initial VPlan: Apply previously taken decisions, in order, to
8866   // bring the VPlan to its final state.
8867   // ---------------------------------------------------------------------------
8868 
8869   // Apply Sink-After legal constraints.
8870   auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
8871     auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
8872     if (Region && Region->isReplicator()) {
8873       assert(Region->getNumSuccessors() == 1 &&
8874              Region->getNumPredecessors() == 1 && "Expected SESE region!");
8875       assert(R->getParent()->size() == 1 &&
8876              "A recipe in an original replicator region must be the only "
8877              "recipe in its block");
8878       return Region;
8879     }
8880     return nullptr;
8881   };
8882   for (auto &Entry : SinkAfter) {
8883     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
8884     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
8885 
8886     auto *TargetRegion = GetReplicateRegion(Target);
8887     auto *SinkRegion = GetReplicateRegion(Sink);
8888     if (!SinkRegion) {
8889       // If the sink source is not a replicate region, sink the recipe directly.
8890       if (TargetRegion) {
8891         // The target is in a replication region, make sure to move Sink to
8892         // the block after it, not into the replication region itself.
8893         VPBasicBlock *NextBlock =
8894             cast<VPBasicBlock>(TargetRegion->getSuccessors().front());
8895         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
8896       } else
8897         Sink->moveAfter(Target);
8898       continue;
8899     }
8900 
8901     // The sink source is in a replicate region. Unhook the region from the CFG.
8902     auto *SinkPred = SinkRegion->getSinglePredecessor();
8903     auto *SinkSucc = SinkRegion->getSingleSuccessor();
8904     VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion);
8905     VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc);
8906     VPBlockUtils::connectBlocks(SinkPred, SinkSucc);
8907 
8908     if (TargetRegion) {
8909       // The target recipe is also in a replicate region, move the sink region
8910       // after the target region.
8911       auto *TargetSucc = TargetRegion->getSingleSuccessor();
8912       VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc);
8913       VPBlockUtils::connectBlocks(TargetRegion, SinkRegion);
8914       VPBlockUtils::connectBlocks(SinkRegion, TargetSucc);
8915     } else {
8916       // The sink source is in a replicate region, we need to move the whole
8917       // replicate region, which should only contain a single recipe in the
8918       // main block.
8919       auto *SplitBlock =
8920           Target->getParent()->splitAt(std::next(Target->getIterator()));
8921 
8922       auto *SplitPred = SplitBlock->getSinglePredecessor();
8923 
8924       VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
8925       VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
8926       VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
8927     }
8928   }
8929 
8930   VPlanTransforms::removeRedundantCanonicalIVs(*Plan);
8931   VPlanTransforms::removeRedundantInductionCasts(*Plan);
8932 
8933   // Now that sink-after is done, move induction recipes for optimized truncates
8934   // to the phi section of the header block.
8935   for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove)
8936     Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
8937 
8938   // Adjust the recipes for any inloop reductions.
8939   adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan,
8940                              RecipeBuilder, Range.Start);
8941 
8942   // Introduce a recipe to combine the incoming and previous values of a
8943   // first-order recurrence.
8944   for (VPRecipeBase &R :
8945        Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
8946     auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R);
8947     if (!RecurPhi)
8948       continue;
8949 
8950     VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe();
8951     VPBasicBlock *InsertBlock = PrevRecipe->getParent();
8952     auto *Region = GetReplicateRegion(PrevRecipe);
8953     if (Region)
8954       InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor());
8955     if (Region || PrevRecipe->isPhi())
8956       Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
8957     else
8958       Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator()));
8959 
8960     auto *RecurSplice = cast<VPInstruction>(
8961         Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice,
8962                              {RecurPhi, RecurPhi->getBackedgeValue()}));
8963 
8964     RecurPhi->replaceAllUsesWith(RecurSplice);
8965     // Set the first operand of RecurSplice to RecurPhi again, after replacing
8966     // all users.
8967     RecurSplice->setOperand(0, RecurPhi);
8968   }
8969 
8970   // Interleave memory: for each Interleave Group we marked earlier as relevant
8971   // for this VPlan, replace the Recipes widening its memory instructions with a
8972   // single VPInterleaveRecipe at its insertion point.
8973   for (auto IG : InterleaveGroups) {
8974     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
8975         RecipeBuilder.getRecipe(IG->getInsertPos()));
8976     SmallVector<VPValue *, 4> StoredValues;
8977     for (unsigned i = 0; i < IG->getFactor(); ++i)
8978       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
8979         auto *StoreR =
8980             cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI));
8981         StoredValues.push_back(StoreR->getStoredValue());
8982       }
8983 
8984     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
8985                                         Recipe->getMask());
8986     VPIG->insertBefore(Recipe);
8987     unsigned J = 0;
8988     for (unsigned i = 0; i < IG->getFactor(); ++i)
8989       if (Instruction *Member = IG->getMember(i)) {
8990         if (!Member->getType()->isVoidTy()) {
8991           VPValue *OriginalV = Plan->getVPValue(Member);
8992           Plan->removeVPValueFor(Member);
8993           Plan->addVPValue(Member, VPIG->getVPValue(J));
8994           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
8995           J++;
8996         }
8997         RecipeBuilder.getRecipe(Member)->eraseFromParent();
8998       }
8999   }
9000 
9001   // From this point onwards, VPlan-to-VPlan transformations may change the plan
9002   // in ways that accessing values using original IR values is incorrect.
9003   Plan->disableValue2VPValue();
9004 
9005   VPlanTransforms::optimizeInductions(*Plan, *PSE.getSE());
9006   VPlanTransforms::sinkScalarOperands(*Plan);
9007   VPlanTransforms::mergeReplicateRegions(*Plan);
9008   VPlanTransforms::removeDeadRecipes(*Plan, *OrigLoop);
9009 
9010   std::string PlanName;
9011   raw_string_ostream RSO(PlanName);
9012   ElementCount VF = Range.Start;
9013   Plan->addVF(VF);
9014   RSO << "Initial VPlan for VF={" << VF;
9015   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
9016     Plan->addVF(VF);
9017     RSO << "," << VF;
9018   }
9019   RSO << "},UF>=1";
9020   RSO.flush();
9021   Plan->setName(PlanName);
9022 
9023   // Fold Exit block into its predecessor if possible.
9024   // TODO: Fold block earlier once all VPlan transforms properly maintain a
9025   // VPBasicBlock as exit.
9026   VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit());
9027 
9028   assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid");
9029   return Plan;
9030 }
9031 
9032 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9033   // Outer loop handling: They may require CFG and instruction level
9034   // transformations before even evaluating whether vectorization is profitable.
9035   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9036   // the vectorization pipeline.
9037   assert(!OrigLoop->isInnermost());
9038   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9039 
9040   // Create new empty VPlan
9041   auto Plan = std::make_unique<VPlan>();
9042 
9043   // Build hierarchical CFG
9044   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9045   HCFGBuilder.buildHierarchicalCFG();
9046 
9047   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9048        VF *= 2)
9049     Plan->addVF(VF);
9050 
9051   if (EnableVPlanPredication) {
9052     VPlanPredicator VPP(*Plan);
9053     VPP.predicate();
9054 
9055     // Avoid running transformation to recipes until masked code generation in
9056     // VPlan-native path is in place.
9057     return Plan;
9058   }
9059 
9060   SmallPtrSet<Instruction *, 1> DeadInstructions;
9061   VPlanTransforms::VPInstructionsToVPRecipes(
9062       OrigLoop, Plan,
9063       [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); },
9064       DeadInstructions, *PSE.getSE());
9065 
9066   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(),
9067                         true, true);
9068   return Plan;
9069 }
9070 
9071 // Adjust the recipes for reductions. For in-loop reductions the chain of
9072 // instructions leading from the loop exit instr to the phi need to be converted
9073 // to reductions, with one operand being vector and the other being the scalar
9074 // reduction chain. For other reductions, a select is introduced between the phi
9075 // and live-out recipes when folding the tail.
9076 void LoopVectorizationPlanner::adjustRecipesForReductions(
9077     VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder,
9078     ElementCount MinVF) {
9079   for (auto &Reduction : CM.getInLoopReductionChains()) {
9080     PHINode *Phi = Reduction.first;
9081     const RecurrenceDescriptor &RdxDesc =
9082         Legal->getReductionVars().find(Phi)->second;
9083     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9084 
9085     if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc))
9086       continue;
9087 
9088     // ReductionOperations are orders top-down from the phi's use to the
9089     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9090     // which of the two operands will remain scalar and which will be reduced.
9091     // For minmax the chain will be the select instructions.
9092     Instruction *Chain = Phi;
9093     for (Instruction *R : ReductionOperations) {
9094       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9095       RecurKind Kind = RdxDesc.getRecurrenceKind();
9096 
9097       VPValue *ChainOp = Plan->getVPValue(Chain);
9098       unsigned FirstOpId;
9099       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
9100              "Only min/max recurrences allowed for inloop reductions");
9101       // Recognize a call to the llvm.fmuladd intrinsic.
9102       bool IsFMulAdd = (Kind == RecurKind::FMulAdd);
9103       assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) &&
9104              "Expected instruction to be a call to the llvm.fmuladd intrinsic");
9105       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9106         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9107                "Expected to replace a VPWidenSelectSC");
9108         FirstOpId = 1;
9109       } else {
9110         assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) ||
9111                 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) &&
9112                "Expected to replace a VPWidenSC");
9113         FirstOpId = 0;
9114       }
9115       unsigned VecOpId =
9116           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9117       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9118 
9119       auto *CondOp = CM.blockNeedsPredicationForAnyReason(R->getParent())
9120                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9121                          : nullptr;
9122 
9123       if (IsFMulAdd) {
9124         // If the instruction is a call to the llvm.fmuladd intrinsic then we
9125         // need to create an fmul recipe to use as the vector operand for the
9126         // fadd reduction.
9127         VPInstruction *FMulRecipe = new VPInstruction(
9128             Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))});
9129         FMulRecipe->setFastMathFlags(R->getFastMathFlags());
9130         WidenRecipe->getParent()->insert(FMulRecipe,
9131                                          WidenRecipe->getIterator());
9132         VecOp = FMulRecipe;
9133       }
9134       VPReductionRecipe *RedRecipe =
9135           new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9136       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9137       Plan->removeVPValueFor(R);
9138       Plan->addVPValue(R, RedRecipe);
9139       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9140       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9141       WidenRecipe->eraseFromParent();
9142 
9143       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9144         VPRecipeBase *CompareRecipe =
9145             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9146         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9147                "Expected to replace a VPWidenSC");
9148         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9149                "Expected no remaining users");
9150         CompareRecipe->eraseFromParent();
9151       }
9152       Chain = R;
9153     }
9154   }
9155 
9156   // If tail is folded by masking, introduce selects between the phi
9157   // and the live-out instruction of each reduction, at the beginning of the
9158   // dedicated latch block.
9159   if (CM.foldTailByMasking()) {
9160     Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin());
9161     for (VPRecipeBase &R :
9162          Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
9163       VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9164       if (!PhiR || PhiR->isInLoop())
9165         continue;
9166       VPValue *Cond =
9167           RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
9168       VPValue *Red = PhiR->getBackedgeValue();
9169       assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB &&
9170              "reduction recipe must be defined before latch");
9171       Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR});
9172     }
9173   }
9174 }
9175 
9176 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9177 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9178                                VPSlotTracker &SlotTracker) const {
9179   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9180   IG->getInsertPos()->printAsOperand(O, false);
9181   O << ", ";
9182   getAddr()->printAsOperand(O, SlotTracker);
9183   VPValue *Mask = getMask();
9184   if (Mask) {
9185     O << ", ";
9186     Mask->printAsOperand(O, SlotTracker);
9187   }
9188 
9189   unsigned OpIdx = 0;
9190   for (unsigned i = 0; i < IG->getFactor(); ++i) {
9191     if (!IG->getMember(i))
9192       continue;
9193     if (getNumStoreOperands() > 0) {
9194       O << "\n" << Indent << "  store ";
9195       getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker);
9196       O << " to index " << i;
9197     } else {
9198       O << "\n" << Indent << "  ";
9199       getVPValue(OpIdx)->printAsOperand(O, SlotTracker);
9200       O << " = load from index " << i;
9201     }
9202     ++OpIdx;
9203   }
9204 }
9205 #endif
9206 
9207 void VPWidenCallRecipe::execute(VPTransformState &State) {
9208   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9209                                   *this, State);
9210 }
9211 
9212 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9213   auto &I = *cast<SelectInst>(getUnderlyingInstr());
9214   State.ILV->setDebugLocFromInst(&I);
9215 
9216   // The condition can be loop invariant  but still defined inside the
9217   // loop. This means that we can't just use the original 'cond' value.
9218   // We have to take the 'vectorized' value and pick the first lane.
9219   // Instcombine will make this a no-op.
9220   auto *InvarCond =
9221       InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr;
9222 
9223   for (unsigned Part = 0; Part < State.UF; ++Part) {
9224     Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part);
9225     Value *Op0 = State.get(getOperand(1), Part);
9226     Value *Op1 = State.get(getOperand(2), Part);
9227     Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
9228     State.set(this, Sel, Part);
9229     State.ILV->addMetadata(Sel, &I);
9230   }
9231 }
9232 
9233 void VPWidenRecipe::execute(VPTransformState &State) {
9234   auto &I = *cast<Instruction>(getUnderlyingValue());
9235   auto &Builder = State.Builder;
9236   switch (I.getOpcode()) {
9237   case Instruction::Call:
9238   case Instruction::Br:
9239   case Instruction::PHI:
9240   case Instruction::GetElementPtr:
9241   case Instruction::Select:
9242     llvm_unreachable("This instruction is handled by a different recipe.");
9243   case Instruction::UDiv:
9244   case Instruction::SDiv:
9245   case Instruction::SRem:
9246   case Instruction::URem:
9247   case Instruction::Add:
9248   case Instruction::FAdd:
9249   case Instruction::Sub:
9250   case Instruction::FSub:
9251   case Instruction::FNeg:
9252   case Instruction::Mul:
9253   case Instruction::FMul:
9254   case Instruction::FDiv:
9255   case Instruction::FRem:
9256   case Instruction::Shl:
9257   case Instruction::LShr:
9258   case Instruction::AShr:
9259   case Instruction::And:
9260   case Instruction::Or:
9261   case Instruction::Xor: {
9262     // Just widen unops and binops.
9263     State.ILV->setDebugLocFromInst(&I);
9264 
9265     for (unsigned Part = 0; Part < State.UF; ++Part) {
9266       SmallVector<Value *, 2> Ops;
9267       for (VPValue *VPOp : operands())
9268         Ops.push_back(State.get(VPOp, Part));
9269 
9270       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
9271 
9272       if (auto *VecOp = dyn_cast<Instruction>(V)) {
9273         VecOp->copyIRFlags(&I);
9274 
9275         // If the instruction is vectorized and was in a basic block that needed
9276         // predication, we can't propagate poison-generating flags (nuw/nsw,
9277         // exact, etc.). The control flow has been linearized and the
9278         // instruction is no longer guarded by the predicate, which could make
9279         // the flag properties to no longer hold.
9280         if (State.MayGeneratePoisonRecipes.contains(this))
9281           VecOp->dropPoisonGeneratingFlags();
9282       }
9283 
9284       // Use this vector value for all users of the original instruction.
9285       State.set(this, V, Part);
9286       State.ILV->addMetadata(V, &I);
9287     }
9288 
9289     break;
9290   }
9291   case Instruction::ICmp:
9292   case Instruction::FCmp: {
9293     // Widen compares. Generate vector compares.
9294     bool FCmp = (I.getOpcode() == Instruction::FCmp);
9295     auto *Cmp = cast<CmpInst>(&I);
9296     State.ILV->setDebugLocFromInst(Cmp);
9297     for (unsigned Part = 0; Part < State.UF; ++Part) {
9298       Value *A = State.get(getOperand(0), Part);
9299       Value *B = State.get(getOperand(1), Part);
9300       Value *C = nullptr;
9301       if (FCmp) {
9302         // Propagate fast math flags.
9303         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
9304         Builder.setFastMathFlags(Cmp->getFastMathFlags());
9305         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
9306       } else {
9307         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
9308       }
9309       State.set(this, C, Part);
9310       State.ILV->addMetadata(C, &I);
9311     }
9312 
9313     break;
9314   }
9315 
9316   case Instruction::ZExt:
9317   case Instruction::SExt:
9318   case Instruction::FPToUI:
9319   case Instruction::FPToSI:
9320   case Instruction::FPExt:
9321   case Instruction::PtrToInt:
9322   case Instruction::IntToPtr:
9323   case Instruction::SIToFP:
9324   case Instruction::UIToFP:
9325   case Instruction::Trunc:
9326   case Instruction::FPTrunc:
9327   case Instruction::BitCast: {
9328     auto *CI = cast<CastInst>(&I);
9329     State.ILV->setDebugLocFromInst(CI);
9330 
9331     /// Vectorize casts.
9332     Type *DestTy = (State.VF.isScalar())
9333                        ? CI->getType()
9334                        : VectorType::get(CI->getType(), State.VF);
9335 
9336     for (unsigned Part = 0; Part < State.UF; ++Part) {
9337       Value *A = State.get(getOperand(0), Part);
9338       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
9339       State.set(this, Cast, Part);
9340       State.ILV->addMetadata(Cast, &I);
9341     }
9342     break;
9343   }
9344   default:
9345     // This instruction is not vectorized by simple widening.
9346     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
9347     llvm_unreachable("Unhandled instruction!");
9348   } // end of switch.
9349 }
9350 
9351 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9352   auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
9353   // Construct a vector GEP by widening the operands of the scalar GEP as
9354   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
9355   // results in a vector of pointers when at least one operand of the GEP
9356   // is vector-typed. Thus, to keep the representation compact, we only use
9357   // vector-typed operands for loop-varying values.
9358 
9359   if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
9360     // If we are vectorizing, but the GEP has only loop-invariant operands,
9361     // the GEP we build (by only using vector-typed operands for
9362     // loop-varying values) would be a scalar pointer. Thus, to ensure we
9363     // produce a vector of pointers, we need to either arbitrarily pick an
9364     // operand to broadcast, or broadcast a clone of the original GEP.
9365     // Here, we broadcast a clone of the original.
9366     //
9367     // TODO: If at some point we decide to scalarize instructions having
9368     //       loop-invariant operands, this special case will no longer be
9369     //       required. We would add the scalarization decision to
9370     //       collectLoopScalars() and teach getVectorValue() to broadcast
9371     //       the lane-zero scalar value.
9372     auto *Clone = State.Builder.Insert(GEP->clone());
9373     for (unsigned Part = 0; Part < State.UF; ++Part) {
9374       Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone);
9375       State.set(this, EntryPart, Part);
9376       State.ILV->addMetadata(EntryPart, GEP);
9377     }
9378   } else {
9379     // If the GEP has at least one loop-varying operand, we are sure to
9380     // produce a vector of pointers. But if we are only unrolling, we want
9381     // to produce a scalar GEP for each unroll part. Thus, the GEP we
9382     // produce with the code below will be scalar (if VF == 1) or vector
9383     // (otherwise). Note that for the unroll-only case, we still maintain
9384     // values in the vector mapping with initVector, as we do for other
9385     // instructions.
9386     for (unsigned Part = 0; Part < State.UF; ++Part) {
9387       // The pointer operand of the new GEP. If it's loop-invariant, we
9388       // won't broadcast it.
9389       auto *Ptr = IsPtrLoopInvariant
9390                       ? State.get(getOperand(0), VPIteration(0, 0))
9391                       : State.get(getOperand(0), Part);
9392 
9393       // Collect all the indices for the new GEP. If any index is
9394       // loop-invariant, we won't broadcast it.
9395       SmallVector<Value *, 4> Indices;
9396       for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
9397         VPValue *Operand = getOperand(I);
9398         if (IsIndexLoopInvariant[I - 1])
9399           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
9400         else
9401           Indices.push_back(State.get(Operand, Part));
9402       }
9403 
9404       // If the GEP instruction is vectorized and was in a basic block that
9405       // needed predication, we can't propagate the poison-generating 'inbounds'
9406       // flag. The control flow has been linearized and the GEP is no longer
9407       // guarded by the predicate, which could make the 'inbounds' properties to
9408       // no longer hold.
9409       bool IsInBounds =
9410           GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0;
9411 
9412       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
9413       // but it should be a vector, otherwise.
9414       auto *NewGEP = IsInBounds
9415                          ? State.Builder.CreateInBoundsGEP(
9416                                GEP->getSourceElementType(), Ptr, Indices)
9417                          : State.Builder.CreateGEP(GEP->getSourceElementType(),
9418                                                    Ptr, Indices);
9419       assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
9420              "NewGEP is not a pointer vector");
9421       State.set(this, NewGEP, Part);
9422       State.ILV->addMetadata(NewGEP, GEP);
9423     }
9424   }
9425 }
9426 
9427 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9428   assert(!State.Instance && "Int or FP induction being replicated.");
9429 
9430   Value *Start = getStartValue()->getLiveInIRValue();
9431   const InductionDescriptor &ID = getInductionDescriptor();
9432   TruncInst *Trunc = getTruncInst();
9433   IRBuilderBase &Builder = State.Builder;
9434   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
9435   assert(State.VF.isVector() && "must have vector VF");
9436 
9437   // The value from the original loop to which we are mapping the new induction
9438   // variable.
9439   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
9440 
9441   auto &DL = EntryVal->getModule()->getDataLayout();
9442 
9443   // Generate code for the induction step. Note that induction steps are
9444   // required to be loop-invariant
9445   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
9446     if (SE.isSCEVable(IV->getType())) {
9447       SCEVExpander Exp(SE, DL, "induction");
9448       return Exp.expandCodeFor(Step, Step->getType(),
9449                                State.CFG.VectorPreHeader->getTerminator());
9450     }
9451     return cast<SCEVUnknown>(Step)->getValue();
9452   };
9453 
9454   // Fast-math-flags propagate from the original induction instruction.
9455   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
9456   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
9457     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
9458 
9459   // Now do the actual transformations, and start with creating the step value.
9460   Value *Step = CreateStepValue(ID.getStep());
9461 
9462   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
9463          "Expected either an induction phi-node or a truncate of it!");
9464 
9465   // Construct the initial value of the vector IV in the vector loop preheader
9466   auto CurrIP = Builder.saveIP();
9467   Builder.SetInsertPoint(State.CFG.VectorPreHeader->getTerminator());
9468   if (isa<TruncInst>(EntryVal)) {
9469     assert(Start->getType()->isIntegerTy() &&
9470            "Truncation requires an integer type");
9471     auto *TruncType = cast<IntegerType>(EntryVal->getType());
9472     Step = Builder.CreateTrunc(Step, TruncType);
9473     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
9474   }
9475 
9476   Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
9477   Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start);
9478   Value *SteppedStart = getStepVector(
9479       SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder);
9480 
9481   // We create vector phi nodes for both integer and floating-point induction
9482   // variables. Here, we determine the kind of arithmetic we will perform.
9483   Instruction::BinaryOps AddOp;
9484   Instruction::BinaryOps MulOp;
9485   if (Step->getType()->isIntegerTy()) {
9486     AddOp = Instruction::Add;
9487     MulOp = Instruction::Mul;
9488   } else {
9489     AddOp = ID.getInductionOpcode();
9490     MulOp = Instruction::FMul;
9491   }
9492 
9493   // Multiply the vectorization factor by the step using integer or
9494   // floating-point arithmetic as appropriate.
9495   Type *StepType = Step->getType();
9496   Value *RuntimeVF;
9497   if (Step->getType()->isFloatingPointTy())
9498     RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF);
9499   else
9500     RuntimeVF = getRuntimeVF(Builder, StepType, State.VF);
9501   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
9502 
9503   // Create a vector splat to use in the induction update.
9504   //
9505   // FIXME: If the step is non-constant, we create the vector splat with
9506   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
9507   //        handle a constant vector splat.
9508   Value *SplatVF = isa<Constant>(Mul)
9509                        ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul))
9510                        : Builder.CreateVectorSplat(State.VF, Mul);
9511   Builder.restoreIP(CurrIP);
9512 
9513   // We may need to add the step a number of times, depending on the unroll
9514   // factor. The last of those goes into the PHI.
9515   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
9516                                     &*State.CFG.PrevBB->getFirstInsertionPt());
9517   VecInd->setDebugLoc(EntryVal->getDebugLoc());
9518   Instruction *LastInduction = VecInd;
9519   for (unsigned Part = 0; Part < State.UF; ++Part) {
9520     State.set(this, LastInduction, Part);
9521 
9522     if (isa<TruncInst>(EntryVal))
9523       State.ILV->addMetadata(LastInduction, EntryVal);
9524 
9525     LastInduction = cast<Instruction>(
9526         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
9527     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
9528   }
9529 
9530   LastInduction->setName("vec.ind.next");
9531   VecInd->addIncoming(SteppedStart, State.CFG.VectorPreHeader);
9532   // Add induction update using an incorrect block temporarily. The phi node
9533   // will be fixed after VPlan execution. Note that at this point the latch
9534   // block cannot be used, as it does not exist yet.
9535   // TODO: Model increment value in VPlan, by turning the recipe into a
9536   // multi-def and a subclass of VPHeaderPHIRecipe.
9537   VecInd->addIncoming(LastInduction, State.CFG.VectorPreHeader);
9538 }
9539 
9540 void VPWidenPointerInductionRecipe::execute(VPTransformState &State) {
9541   assert(IndDesc.getKind() == InductionDescriptor::IK_PtrInduction &&
9542          "Not a pointer induction according to InductionDescriptor!");
9543   assert(cast<PHINode>(getUnderlyingInstr())->getType()->isPointerTy() &&
9544          "Unexpected type.");
9545 
9546   auto *IVR = getParent()->getPlan()->getCanonicalIV();
9547   PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0));
9548 
9549   if (all_of(users(), [this](const VPUser *U) {
9550         return cast<VPRecipeBase>(U)->usesScalars(this);
9551       })) {
9552     // This is the normalized GEP that starts counting at zero.
9553     Value *PtrInd = State.Builder.CreateSExtOrTrunc(
9554         CanonicalIV, IndDesc.getStep()->getType());
9555     // Determine the number of scalars we need to generate for each unroll
9556     // iteration. If the instruction is uniform, we only need to generate the
9557     // first lane. Otherwise, we generate all VF values.
9558     bool IsUniform = vputils::onlyFirstLaneUsed(this);
9559     assert((IsUniform || !State.VF.isScalable()) &&
9560            "Cannot scalarize a scalable VF");
9561     unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue();
9562 
9563     for (unsigned Part = 0; Part < State.UF; ++Part) {
9564       Value *PartStart =
9565           createStepForVF(State.Builder, PtrInd->getType(), State.VF, Part);
9566 
9567       for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
9568         Value *Idx = State.Builder.CreateAdd(
9569             PartStart, ConstantInt::get(PtrInd->getType(), Lane));
9570         Value *GlobalIdx = State.Builder.CreateAdd(PtrInd, Idx);
9571 
9572         Value *Step = CreateStepValue(IndDesc.getStep(), SE,
9573                                       State.CFG.PrevBB->getTerminator());
9574         Value *SclrGep = emitTransformedIndex(
9575             State.Builder, GlobalIdx, IndDesc.getStartValue(), Step, IndDesc);
9576         SclrGep->setName("next.gep");
9577         State.set(this, SclrGep, VPIteration(Part, Lane));
9578       }
9579     }
9580     return;
9581   }
9582 
9583   assert(isa<SCEVConstant>(IndDesc.getStep()) &&
9584          "Induction step not a SCEV constant!");
9585   Type *PhiType = IndDesc.getStep()->getType();
9586 
9587   // Build a pointer phi
9588   Value *ScalarStartValue = getStartValue()->getLiveInIRValue();
9589   Type *ScStValueType = ScalarStartValue->getType();
9590   PHINode *NewPointerPhi =
9591       PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV);
9592   NewPointerPhi->addIncoming(ScalarStartValue, State.CFG.VectorPreHeader);
9593 
9594   // A pointer induction, performed by using a gep
9595   const DataLayout &DL = NewPointerPhi->getModule()->getDataLayout();
9596   Instruction *InductionLoc = &*State.Builder.GetInsertPoint();
9597 
9598   const SCEV *ScalarStep = IndDesc.getStep();
9599   SCEVExpander Exp(SE, DL, "induction");
9600   Value *ScalarStepValue = Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
9601   Value *RuntimeVF = getRuntimeVF(State.Builder, PhiType, State.VF);
9602   Value *NumUnrolledElems =
9603       State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
9604   Value *InductionGEP = GetElementPtrInst::Create(
9605       IndDesc.getElementType(), NewPointerPhi,
9606       State.Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
9607       InductionLoc);
9608   // Add induction update using an incorrect block temporarily. The phi node
9609   // will be fixed after VPlan execution. Note that at this point the latch
9610   // block cannot be used, as it does not exist yet.
9611   // TODO: Model increment value in VPlan, by turning the recipe into a
9612   // multi-def and a subclass of VPHeaderPHIRecipe.
9613   NewPointerPhi->addIncoming(InductionGEP, State.CFG.VectorPreHeader);
9614 
9615   // Create UF many actual address geps that use the pointer
9616   // phi as base and a vectorized version of the step value
9617   // (<step*0, ..., step*N>) as offset.
9618   for (unsigned Part = 0; Part < State.UF; ++Part) {
9619     Type *VecPhiType = VectorType::get(PhiType, State.VF);
9620     Value *StartOffsetScalar =
9621         State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
9622     Value *StartOffset =
9623         State.Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
9624     // Create a vector of consecutive numbers from zero to VF.
9625     StartOffset = State.Builder.CreateAdd(
9626         StartOffset, State.Builder.CreateStepVector(VecPhiType));
9627 
9628     Value *GEP = State.Builder.CreateGEP(
9629         IndDesc.getElementType(), NewPointerPhi,
9630         State.Builder.CreateMul(
9631             StartOffset,
9632             State.Builder.CreateVectorSplat(State.VF, ScalarStepValue),
9633             "vector.gep"));
9634     State.set(this, GEP, Part);
9635   }
9636 }
9637 
9638 void VPScalarIVStepsRecipe::execute(VPTransformState &State) {
9639   assert(!State.Instance && "VPScalarIVStepsRecipe being replicated.");
9640 
9641   // Fast-math-flags propagate from the original induction instruction.
9642   IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
9643   if (IndDesc.getInductionBinOp() &&
9644       isa<FPMathOperator>(IndDesc.getInductionBinOp()))
9645     State.Builder.setFastMathFlags(
9646         IndDesc.getInductionBinOp()->getFastMathFlags());
9647 
9648   Value *Step = State.get(getStepValue(), VPIteration(0, 0));
9649   auto CreateScalarIV = [&](Value *&Step) -> Value * {
9650     Value *ScalarIV = State.get(getCanonicalIV(), VPIteration(0, 0));
9651     auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0);
9652     if (!isCanonical() || CanonicalIV->getType() != Ty) {
9653       ScalarIV =
9654           Ty->isIntegerTy()
9655               ? State.Builder.CreateSExtOrTrunc(ScalarIV, Ty)
9656               : State.Builder.CreateCast(Instruction::SIToFP, ScalarIV, Ty);
9657       ScalarIV = emitTransformedIndex(State.Builder, ScalarIV,
9658                                       getStartValue()->getLiveInIRValue(), Step,
9659                                       IndDesc);
9660       ScalarIV->setName("offset.idx");
9661     }
9662     if (TruncToTy) {
9663       assert(Step->getType()->isIntegerTy() &&
9664              "Truncation requires an integer step");
9665       ScalarIV = State.Builder.CreateTrunc(ScalarIV, TruncToTy);
9666       Step = State.Builder.CreateTrunc(Step, TruncToTy);
9667     }
9668     return ScalarIV;
9669   };
9670 
9671   Value *ScalarIV = CreateScalarIV(Step);
9672   if (State.VF.isVector()) {
9673     buildScalarSteps(ScalarIV, Step, IndDesc, this, State);
9674     return;
9675   }
9676 
9677   for (unsigned Part = 0; Part < State.UF; ++Part) {
9678     assert(!State.VF.isScalable() && "scalable vectors not yet supported.");
9679     Value *EntryPart;
9680     if (Step->getType()->isFloatingPointTy()) {
9681       Value *StartIdx =
9682           getRuntimeVFAsFloat(State.Builder, Step->getType(), State.VF * Part);
9683       // Floating-point operations inherit FMF via the builder's flags.
9684       Value *MulOp = State.Builder.CreateFMul(StartIdx, Step);
9685       EntryPart = State.Builder.CreateBinOp(IndDesc.getInductionOpcode(),
9686                                             ScalarIV, MulOp);
9687     } else {
9688       Value *StartIdx =
9689           getRuntimeVF(State.Builder, Step->getType(), State.VF * Part);
9690       EntryPart = State.Builder.CreateAdd(
9691           ScalarIV, State.Builder.CreateMul(StartIdx, Step), "induction");
9692     }
9693     State.set(this, EntryPart, Part);
9694   }
9695 }
9696 
9697 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9698   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this,
9699                                  State);
9700 }
9701 
9702 void VPBlendRecipe::execute(VPTransformState &State) {
9703   State.ILV->setDebugLocFromInst(Phi, &State.Builder);
9704   // We know that all PHIs in non-header blocks are converted into
9705   // selects, so we don't have to worry about the insertion order and we
9706   // can just use the builder.
9707   // At this point we generate the predication tree. There may be
9708   // duplications since this is a simple recursive scan, but future
9709   // optimizations will clean it up.
9710 
9711   unsigned NumIncoming = getNumIncomingValues();
9712 
9713   // Generate a sequence of selects of the form:
9714   // SELECT(Mask3, In3,
9715   //        SELECT(Mask2, In2,
9716   //               SELECT(Mask1, In1,
9717   //                      In0)))
9718   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9719   // are essentially undef are taken from In0.
9720   InnerLoopVectorizer::VectorParts Entry(State.UF);
9721   for (unsigned In = 0; In < NumIncoming; ++In) {
9722     for (unsigned Part = 0; Part < State.UF; ++Part) {
9723       // We might have single edge PHIs (blocks) - use an identity
9724       // 'select' for the first PHI operand.
9725       Value *In0 = State.get(getIncomingValue(In), Part);
9726       if (In == 0)
9727         Entry[Part] = In0; // Initialize with the first incoming value.
9728       else {
9729         // Select between the current value and the previous incoming edge
9730         // based on the incoming mask.
9731         Value *Cond = State.get(getMask(In), Part);
9732         Entry[Part] =
9733             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9734       }
9735     }
9736   }
9737   for (unsigned Part = 0; Part < State.UF; ++Part)
9738     State.set(this, Entry[Part], Part);
9739 }
9740 
9741 void VPInterleaveRecipe::execute(VPTransformState &State) {
9742   assert(!State.Instance && "Interleave group being replicated.");
9743   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9744                                       getStoredValues(), getMask());
9745 }
9746 
9747 void VPReductionRecipe::execute(VPTransformState &State) {
9748   assert(!State.Instance && "Reduction being replicated.");
9749   Value *PrevInChain = State.get(getChainOp(), 0);
9750   RecurKind Kind = RdxDesc->getRecurrenceKind();
9751   bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc);
9752   // Propagate the fast-math flags carried by the underlying instruction.
9753   IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
9754   State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags());
9755   for (unsigned Part = 0; Part < State.UF; ++Part) {
9756     Value *NewVecOp = State.get(getVecOp(), Part);
9757     if (VPValue *Cond = getCondOp()) {
9758       Value *NewCond = State.get(Cond, Part);
9759       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9760       Value *Iden = RdxDesc->getRecurrenceIdentity(
9761           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9762       Value *IdenVec =
9763           State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden);
9764       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9765       NewVecOp = Select;
9766     }
9767     Value *NewRed;
9768     Value *NextInChain;
9769     if (IsOrdered) {
9770       if (State.VF.isVector())
9771         NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9772                                         PrevInChain);
9773       else
9774         NewRed = State.Builder.CreateBinOp(
9775             (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain,
9776             NewVecOp);
9777       PrevInChain = NewRed;
9778     } else {
9779       PrevInChain = State.get(getChainOp(), Part);
9780       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9781     }
9782     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9783       NextInChain =
9784           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9785                          NewRed, PrevInChain);
9786     } else if (IsOrdered)
9787       NextInChain = NewRed;
9788     else
9789       NextInChain = State.Builder.CreateBinOp(
9790           (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed,
9791           PrevInChain);
9792     State.set(this, NextInChain, Part);
9793   }
9794 }
9795 
9796 void VPReplicateRecipe::execute(VPTransformState &State) {
9797   if (State.Instance) { // Generate a single instance.
9798     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9799     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance,
9800                                     IsPredicated, State);
9801     // Insert scalar instance packing it into a vector.
9802     if (AlsoPack && State.VF.isVector()) {
9803       // If we're constructing lane 0, initialize to start from poison.
9804       if (State.Instance->Lane.isFirstLane()) {
9805         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9806         Value *Poison = PoisonValue::get(
9807             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9808         State.set(this, Poison, State.Instance->Part);
9809       }
9810       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9811     }
9812     return;
9813   }
9814 
9815   // Generate scalar instances for all VF lanes of all UF parts, unless the
9816   // instruction is uniform inwhich case generate only the first lane for each
9817   // of the UF parts.
9818   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9819   assert((!State.VF.isScalable() || IsUniform) &&
9820          "Can't scalarize a scalable vector");
9821   for (unsigned Part = 0; Part < State.UF; ++Part)
9822     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9823       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this,
9824                                       VPIteration(Part, Lane), IsPredicated,
9825                                       State);
9826 }
9827 
9828 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9829   assert(State.Instance && "Branch on Mask works only on single instance.");
9830 
9831   unsigned Part = State.Instance->Part;
9832   unsigned Lane = State.Instance->Lane.getKnownLane();
9833 
9834   Value *ConditionBit = nullptr;
9835   VPValue *BlockInMask = getMask();
9836   if (BlockInMask) {
9837     ConditionBit = State.get(BlockInMask, Part);
9838     if (ConditionBit->getType()->isVectorTy())
9839       ConditionBit = State.Builder.CreateExtractElement(
9840           ConditionBit, State.Builder.getInt32(Lane));
9841   } else // Block in mask is all-one.
9842     ConditionBit = State.Builder.getTrue();
9843 
9844   // Replace the temporary unreachable terminator with a new conditional branch,
9845   // whose two destinations will be set later when they are created.
9846   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9847   assert(isa<UnreachableInst>(CurrentTerminator) &&
9848          "Expected to replace unreachable terminator with conditional branch.");
9849   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9850   CondBr->setSuccessor(0, nullptr);
9851   ReplaceInstWithInst(CurrentTerminator, CondBr);
9852 }
9853 
9854 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9855   assert(State.Instance && "Predicated instruction PHI works per instance.");
9856   Instruction *ScalarPredInst =
9857       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9858   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9859   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9860   assert(PredicatingBB && "Predicated block has no single predecessor.");
9861   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9862          "operand must be VPReplicateRecipe");
9863 
9864   // By current pack/unpack logic we need to generate only a single phi node: if
9865   // a vector value for the predicated instruction exists at this point it means
9866   // the instruction has vector users only, and a phi for the vector value is
9867   // needed. In this case the recipe of the predicated instruction is marked to
9868   // also do that packing, thereby "hoisting" the insert-element sequence.
9869   // Otherwise, a phi node for the scalar value is needed.
9870   unsigned Part = State.Instance->Part;
9871   if (State.hasVectorValue(getOperand(0), Part)) {
9872     Value *VectorValue = State.get(getOperand(0), Part);
9873     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9874     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9875     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9876     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9877     if (State.hasVectorValue(this, Part))
9878       State.reset(this, VPhi, Part);
9879     else
9880       State.set(this, VPhi, Part);
9881     // NOTE: Currently we need to update the value of the operand, so the next
9882     // predicated iteration inserts its generated value in the correct vector.
9883     State.reset(getOperand(0), VPhi, Part);
9884   } else {
9885     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9886     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9887     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9888                      PredicatingBB);
9889     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9890     if (State.hasScalarValue(this, *State.Instance))
9891       State.reset(this, Phi, *State.Instance);
9892     else
9893       State.set(this, Phi, *State.Instance);
9894     // NOTE: Currently we need to update the value of the operand, so the next
9895     // predicated iteration inserts its generated value in the correct vector.
9896     State.reset(getOperand(0), Phi, *State.Instance);
9897   }
9898 }
9899 
9900 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9901   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9902 
9903   // Attempt to issue a wide load.
9904   LoadInst *LI = dyn_cast<LoadInst>(&Ingredient);
9905   StoreInst *SI = dyn_cast<StoreInst>(&Ingredient);
9906 
9907   assert((LI || SI) && "Invalid Load/Store instruction");
9908   assert((!SI || StoredValue) && "No stored value provided for widened store");
9909   assert((!LI || !StoredValue) && "Stored value provided for widened load");
9910 
9911   Type *ScalarDataTy = getLoadStoreType(&Ingredient);
9912 
9913   auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
9914   const Align Alignment = getLoadStoreAlignment(&Ingredient);
9915   bool CreateGatherScatter = !Consecutive;
9916 
9917   auto &Builder = State.Builder;
9918   InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF);
9919   bool isMaskRequired = getMask();
9920   if (isMaskRequired)
9921     for (unsigned Part = 0; Part < State.UF; ++Part)
9922       BlockInMaskParts[Part] = State.get(getMask(), Part);
9923 
9924   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
9925     // Calculate the pointer for the specific unroll-part.
9926     GetElementPtrInst *PartPtr = nullptr;
9927 
9928     bool InBounds = false;
9929     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
9930       InBounds = gep->isInBounds();
9931     if (Reverse) {
9932       // If the address is consecutive but reversed, then the
9933       // wide store needs to start at the last vector element.
9934       // RunTimeVF =  VScale * VF.getKnownMinValue()
9935       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
9936       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF);
9937       // NumElt = -Part * RunTimeVF
9938       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
9939       // LastLane = 1 - RunTimeVF
9940       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
9941       PartPtr =
9942           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
9943       PartPtr->setIsInBounds(InBounds);
9944       PartPtr = cast<GetElementPtrInst>(
9945           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
9946       PartPtr->setIsInBounds(InBounds);
9947       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
9948         BlockInMaskParts[Part] =
9949             Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse");
9950     } else {
9951       Value *Increment =
9952           createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part);
9953       PartPtr = cast<GetElementPtrInst>(
9954           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
9955       PartPtr->setIsInBounds(InBounds);
9956     }
9957 
9958     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
9959     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
9960   };
9961 
9962   // Handle Stores:
9963   if (SI) {
9964     State.ILV->setDebugLocFromInst(SI);
9965 
9966     for (unsigned Part = 0; Part < State.UF; ++Part) {
9967       Instruction *NewSI = nullptr;
9968       Value *StoredVal = State.get(StoredValue, Part);
9969       if (CreateGatherScatter) {
9970         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
9971         Value *VectorGep = State.get(getAddr(), Part);
9972         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
9973                                             MaskPart);
9974       } else {
9975         if (Reverse) {
9976           // If we store to reverse consecutive memory locations, then we need
9977           // to reverse the order of elements in the stored value.
9978           StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
9979           // We don't want to update the value in the map as it might be used in
9980           // another expression. So don't call resetVectorValue(StoredVal).
9981         }
9982         auto *VecPtr =
9983             CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
9984         if (isMaskRequired)
9985           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
9986                                             BlockInMaskParts[Part]);
9987         else
9988           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
9989       }
9990       State.ILV->addMetadata(NewSI, SI);
9991     }
9992     return;
9993   }
9994 
9995   // Handle loads.
9996   assert(LI && "Must have a load instruction");
9997   State.ILV->setDebugLocFromInst(LI);
9998   for (unsigned Part = 0; Part < State.UF; ++Part) {
9999     Value *NewLI;
10000     if (CreateGatherScatter) {
10001       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
10002       Value *VectorGep = State.get(getAddr(), Part);
10003       NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart,
10004                                          nullptr, "wide.masked.gather");
10005       State.ILV->addMetadata(NewLI, LI);
10006     } else {
10007       auto *VecPtr =
10008           CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
10009       if (isMaskRequired)
10010         NewLI = Builder.CreateMaskedLoad(
10011             DataTy, VecPtr, Alignment, BlockInMaskParts[Part],
10012             PoisonValue::get(DataTy), "wide.masked.load");
10013       else
10014         NewLI =
10015             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
10016 
10017       // Add metadata to the load, but setVectorValue to the reverse shuffle.
10018       State.ILV->addMetadata(NewLI, LI);
10019       if (Reverse)
10020         NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
10021     }
10022 
10023     State.set(this, NewLI, Part);
10024   }
10025 }
10026 
10027 // Determine how to lower the scalar epilogue, which depends on 1) optimising
10028 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
10029 // predication, and 4) a TTI hook that analyses whether the loop is suitable
10030 // for predication.
10031 static ScalarEpilogueLowering getScalarEpilogueLowering(
10032     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
10033     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
10034     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
10035     LoopVectorizationLegality &LVL) {
10036   // 1) OptSize takes precedence over all other options, i.e. if this is set,
10037   // don't look at hints or options, and don't request a scalar epilogue.
10038   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
10039   // LoopAccessInfo (due to code dependency and not being able to reliably get
10040   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
10041   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
10042   // versioning when the vectorization is forced, unlike hasOptSize. So revert
10043   // back to the old way and vectorize with versioning when forced. See D81345.)
10044   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
10045                                                       PGSOQueryType::IRPass) &&
10046                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
10047     return CM_ScalarEpilogueNotAllowedOptSize;
10048 
10049   // 2) If set, obey the directives
10050   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
10051     switch (PreferPredicateOverEpilogue) {
10052     case PreferPredicateTy::ScalarEpilogue:
10053       return CM_ScalarEpilogueAllowed;
10054     case PreferPredicateTy::PredicateElseScalarEpilogue:
10055       return CM_ScalarEpilogueNotNeededUsePredicate;
10056     case PreferPredicateTy::PredicateOrDontVectorize:
10057       return CM_ScalarEpilogueNotAllowedUsePredicate;
10058     };
10059   }
10060 
10061   // 3) If set, obey the hints
10062   switch (Hints.getPredicate()) {
10063   case LoopVectorizeHints::FK_Enabled:
10064     return CM_ScalarEpilogueNotNeededUsePredicate;
10065   case LoopVectorizeHints::FK_Disabled:
10066     return CM_ScalarEpilogueAllowed;
10067   };
10068 
10069   // 4) if the TTI hook indicates this is profitable, request predication.
10070   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
10071                                        LVL.getLAI()))
10072     return CM_ScalarEpilogueNotNeededUsePredicate;
10073 
10074   return CM_ScalarEpilogueAllowed;
10075 }
10076 
10077 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
10078   // If Values have been set for this Def return the one relevant for \p Part.
10079   if (hasVectorValue(Def, Part))
10080     return Data.PerPartOutput[Def][Part];
10081 
10082   if (!hasScalarValue(Def, {Part, 0})) {
10083     Value *IRV = Def->getLiveInIRValue();
10084     Value *B = ILV->getBroadcastInstrs(IRV);
10085     set(Def, B, Part);
10086     return B;
10087   }
10088 
10089   Value *ScalarValue = get(Def, {Part, 0});
10090   // If we aren't vectorizing, we can just copy the scalar map values over
10091   // to the vector map.
10092   if (VF.isScalar()) {
10093     set(Def, ScalarValue, Part);
10094     return ScalarValue;
10095   }
10096 
10097   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
10098   bool IsUniform = RepR && RepR->isUniform();
10099 
10100   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
10101   // Check if there is a scalar value for the selected lane.
10102   if (!hasScalarValue(Def, {Part, LastLane})) {
10103     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
10104     assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) ||
10105             isa<VPScalarIVStepsRecipe>(Def->getDef())) &&
10106            "unexpected recipe found to be invariant");
10107     IsUniform = true;
10108     LastLane = 0;
10109   }
10110 
10111   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
10112   // Set the insert point after the last scalarized instruction or after the
10113   // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
10114   // will directly follow the scalar definitions.
10115   auto OldIP = Builder.saveIP();
10116   auto NewIP =
10117       isa<PHINode>(LastInst)
10118           ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
10119           : std::next(BasicBlock::iterator(LastInst));
10120   Builder.SetInsertPoint(&*NewIP);
10121 
10122   // However, if we are vectorizing, we need to construct the vector values.
10123   // If the value is known to be uniform after vectorization, we can just
10124   // broadcast the scalar value corresponding to lane zero for each unroll
10125   // iteration. Otherwise, we construct the vector values using
10126   // insertelement instructions. Since the resulting vectors are stored in
10127   // State, we will only generate the insertelements once.
10128   Value *VectorValue = nullptr;
10129   if (IsUniform) {
10130     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
10131     set(Def, VectorValue, Part);
10132   } else {
10133     // Initialize packing with insertelements to start from undef.
10134     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
10135     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
10136     set(Def, Undef, Part);
10137     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
10138       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
10139     VectorValue = get(Def, Part);
10140   }
10141   Builder.restoreIP(OldIP);
10142   return VectorValue;
10143 }
10144 
10145 // Process the loop in the VPlan-native vectorization path. This path builds
10146 // VPlan upfront in the vectorization pipeline, which allows to apply
10147 // VPlan-to-VPlan transformations from the very beginning without modifying the
10148 // input LLVM IR.
10149 static bool processLoopInVPlanNativePath(
10150     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
10151     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
10152     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
10153     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
10154     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
10155     LoopVectorizationRequirements &Requirements) {
10156 
10157   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
10158     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
10159     return false;
10160   }
10161   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
10162   Function *F = L->getHeader()->getParent();
10163   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
10164 
10165   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10166       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
10167 
10168   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
10169                                 &Hints, IAI);
10170   // Use the planner for outer loop vectorization.
10171   // TODO: CM is not used at this point inside the planner. Turn CM into an
10172   // optional argument if we don't need it in the future.
10173   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
10174                                Requirements, ORE);
10175 
10176   // Get user vectorization factor.
10177   ElementCount UserVF = Hints.getWidth();
10178 
10179   CM.collectElementTypesForWidening();
10180 
10181   // Plan how to best vectorize, return the best VF and its cost.
10182   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
10183 
10184   // If we are stress testing VPlan builds, do not attempt to generate vector
10185   // code. Masked vector code generation support will follow soon.
10186   // Also, do not attempt to vectorize if no vector code will be produced.
10187   if (VPlanBuildStressTest || EnableVPlanPredication ||
10188       VectorizationFactor::Disabled() == VF)
10189     return false;
10190 
10191   VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10192 
10193   {
10194     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10195                              F->getParent()->getDataLayout());
10196     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
10197                            &CM, BFI, PSI, Checks);
10198     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
10199                       << L->getHeader()->getParent()->getName() << "\"\n");
10200     LVP.executePlan(VF.Width, 1, BestPlan, LB, DT);
10201   }
10202 
10203   // Mark the loop as already vectorized to avoid vectorizing again.
10204   Hints.setAlreadyVectorized();
10205   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10206   return true;
10207 }
10208 
10209 // Emit a remark if there are stores to floats that required a floating point
10210 // extension. If the vectorized loop was generated with floating point there
10211 // will be a performance penalty from the conversion overhead and the change in
10212 // the vector width.
10213 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
10214   SmallVector<Instruction *, 4> Worklist;
10215   for (BasicBlock *BB : L->getBlocks()) {
10216     for (Instruction &Inst : *BB) {
10217       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
10218         if (S->getValueOperand()->getType()->isFloatTy())
10219           Worklist.push_back(S);
10220       }
10221     }
10222   }
10223 
10224   // Traverse the floating point stores upwards searching, for floating point
10225   // conversions.
10226   SmallPtrSet<const Instruction *, 4> Visited;
10227   SmallPtrSet<const Instruction *, 4> EmittedRemark;
10228   while (!Worklist.empty()) {
10229     auto *I = Worklist.pop_back_val();
10230     if (!L->contains(I))
10231       continue;
10232     if (!Visited.insert(I).second)
10233       continue;
10234 
10235     // Emit a remark if the floating point store required a floating
10236     // point conversion.
10237     // TODO: More work could be done to identify the root cause such as a
10238     // constant or a function return type and point the user to it.
10239     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
10240       ORE->emit([&]() {
10241         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
10242                                           I->getDebugLoc(), L->getHeader())
10243                << "floating point conversion changes vector width. "
10244                << "Mixed floating point precision requires an up/down "
10245                << "cast that will negatively impact performance.";
10246       });
10247 
10248     for (Use &Op : I->operands())
10249       if (auto *OpI = dyn_cast<Instruction>(Op))
10250         Worklist.push_back(OpI);
10251   }
10252 }
10253 
10254 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
10255     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
10256                                !EnableLoopInterleaving),
10257       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
10258                               !EnableLoopVectorization) {}
10259 
10260 bool LoopVectorizePass::processLoop(Loop *L) {
10261   assert((EnableVPlanNativePath || L->isInnermost()) &&
10262          "VPlan-native path is not enabled. Only process inner loops.");
10263 
10264 #ifndef NDEBUG
10265   const std::string DebugLocStr = getDebugLocString(L);
10266 #endif /* NDEBUG */
10267 
10268   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '"
10269                     << L->getHeader()->getParent()->getName() << "' from "
10270                     << DebugLocStr << "\n");
10271 
10272   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
10273 
10274   LLVM_DEBUG(
10275       dbgs() << "LV: Loop hints:"
10276              << " force="
10277              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
10278                      ? "disabled"
10279                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
10280                             ? "enabled"
10281                             : "?"))
10282              << " width=" << Hints.getWidth()
10283              << " interleave=" << Hints.getInterleave() << "\n");
10284 
10285   // Function containing loop
10286   Function *F = L->getHeader()->getParent();
10287 
10288   // Looking at the diagnostic output is the only way to determine if a loop
10289   // was vectorized (other than looking at the IR or machine code), so it
10290   // is important to generate an optimization remark for each loop. Most of
10291   // these messages are generated as OptimizationRemarkAnalysis. Remarks
10292   // generated as OptimizationRemark and OptimizationRemarkMissed are
10293   // less verbose reporting vectorized loops and unvectorized loops that may
10294   // benefit from vectorization, respectively.
10295 
10296   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
10297     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
10298     return false;
10299   }
10300 
10301   PredicatedScalarEvolution PSE(*SE, *L);
10302 
10303   // Check if it is legal to vectorize the loop.
10304   LoopVectorizationRequirements Requirements;
10305   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
10306                                 &Requirements, &Hints, DB, AC, BFI, PSI);
10307   if (!LVL.canVectorize(EnableVPlanNativePath)) {
10308     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
10309     Hints.emitRemarkWithHints();
10310     return false;
10311   }
10312 
10313   // Check the function attributes and profiles to find out if this function
10314   // should be optimized for size.
10315   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10316       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
10317 
10318   // Entrance to the VPlan-native vectorization path. Outer loops are processed
10319   // here. They may require CFG and instruction level transformations before
10320   // even evaluating whether vectorization is profitable. Since we cannot modify
10321   // the incoming IR, we need to build VPlan upfront in the vectorization
10322   // pipeline.
10323   if (!L->isInnermost())
10324     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
10325                                         ORE, BFI, PSI, Hints, Requirements);
10326 
10327   assert(L->isInnermost() && "Inner loop expected.");
10328 
10329   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
10330   // count by optimizing for size, to minimize overheads.
10331   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
10332   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
10333     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
10334                       << "This loop is worth vectorizing only if no scalar "
10335                       << "iteration overheads are incurred.");
10336     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
10337       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
10338     else {
10339       LLVM_DEBUG(dbgs() << "\n");
10340       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
10341     }
10342   }
10343 
10344   // Check the function attributes to see if implicit floats are allowed.
10345   // FIXME: This check doesn't seem possibly correct -- what if the loop is
10346   // an integer loop and the vector instructions selected are purely integer
10347   // vector instructions?
10348   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
10349     reportVectorizationFailure(
10350         "Can't vectorize when the NoImplicitFloat attribute is used",
10351         "loop not vectorized due to NoImplicitFloat attribute",
10352         "NoImplicitFloat", ORE, L);
10353     Hints.emitRemarkWithHints();
10354     return false;
10355   }
10356 
10357   // Check if the target supports potentially unsafe FP vectorization.
10358   // FIXME: Add a check for the type of safety issue (denormal, signaling)
10359   // for the target we're vectorizing for, to make sure none of the
10360   // additional fp-math flags can help.
10361   if (Hints.isPotentiallyUnsafe() &&
10362       TTI->isFPVectorizationPotentiallyUnsafe()) {
10363     reportVectorizationFailure(
10364         "Potentially unsafe FP op prevents vectorization",
10365         "loop not vectorized due to unsafe FP support.",
10366         "UnsafeFP", ORE, L);
10367     Hints.emitRemarkWithHints();
10368     return false;
10369   }
10370 
10371   bool AllowOrderedReductions;
10372   // If the flag is set, use that instead and override the TTI behaviour.
10373   if (ForceOrderedReductions.getNumOccurrences() > 0)
10374     AllowOrderedReductions = ForceOrderedReductions;
10375   else
10376     AllowOrderedReductions = TTI->enableOrderedReductions();
10377   if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
10378     ORE->emit([&]() {
10379       auto *ExactFPMathInst = Requirements.getExactFPInst();
10380       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10381                                                  ExactFPMathInst->getDebugLoc(),
10382                                                  ExactFPMathInst->getParent())
10383              << "loop not vectorized: cannot prove it is safe to reorder "
10384                 "floating-point operations";
10385     });
10386     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10387                          "reorder floating-point operations\n");
10388     Hints.emitRemarkWithHints();
10389     return false;
10390   }
10391 
10392   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
10393   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
10394 
10395   // If an override option has been passed in for interleaved accesses, use it.
10396   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
10397     UseInterleaved = EnableInterleavedMemAccesses;
10398 
10399   // Analyze interleaved memory accesses.
10400   if (UseInterleaved) {
10401     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
10402   }
10403 
10404   // Use the cost model.
10405   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10406                                 F, &Hints, IAI);
10407   CM.collectValuesToIgnore();
10408   CM.collectElementTypesForWidening();
10409 
10410   // Use the planner for vectorization.
10411   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
10412                                Requirements, ORE);
10413 
10414   // Get user vectorization factor and interleave count.
10415   ElementCount UserVF = Hints.getWidth();
10416   unsigned UserIC = Hints.getInterleave();
10417 
10418   // Plan how to best vectorize, return the best VF and its cost.
10419   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
10420 
10421   VectorizationFactor VF = VectorizationFactor::Disabled();
10422   unsigned IC = 1;
10423 
10424   if (MaybeVF) {
10425     VF = *MaybeVF;
10426     // Select the interleave count.
10427     IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue());
10428   }
10429 
10430   // Identify the diagnostic messages that should be produced.
10431   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10432   bool VectorizeLoop = true, InterleaveLoop = true;
10433   if (VF.Width.isScalar()) {
10434     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10435     VecDiagMsg = std::make_pair(
10436         "VectorizationNotBeneficial",
10437         "the cost-model indicates that vectorization is not beneficial");
10438     VectorizeLoop = false;
10439   }
10440 
10441   if (!MaybeVF && UserIC > 1) {
10442     // Tell the user interleaving was avoided up-front, despite being explicitly
10443     // requested.
10444     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10445                          "interleaving should be avoided up front\n");
10446     IntDiagMsg = std::make_pair(
10447         "InterleavingAvoided",
10448         "Ignoring UserIC, because interleaving was avoided up front");
10449     InterleaveLoop = false;
10450   } else if (IC == 1 && UserIC <= 1) {
10451     // Tell the user interleaving is not beneficial.
10452     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10453     IntDiagMsg = std::make_pair(
10454         "InterleavingNotBeneficial",
10455         "the cost-model indicates that interleaving is not beneficial");
10456     InterleaveLoop = false;
10457     if (UserIC == 1) {
10458       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10459       IntDiagMsg.second +=
10460           " and is explicitly disabled or interleave count is set to 1";
10461     }
10462   } else if (IC > 1 && UserIC == 1) {
10463     // Tell the user interleaving is beneficial, but it explicitly disabled.
10464     LLVM_DEBUG(
10465         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
10466     IntDiagMsg = std::make_pair(
10467         "InterleavingBeneficialButDisabled",
10468         "the cost-model indicates that interleaving is beneficial "
10469         "but is explicitly disabled or interleave count is set to 1");
10470     InterleaveLoop = false;
10471   }
10472 
10473   // Override IC if user provided an interleave count.
10474   IC = UserIC > 0 ? UserIC : IC;
10475 
10476   // Emit diagnostic messages, if any.
10477   const char *VAPassName = Hints.vectorizeAnalysisPassName();
10478   if (!VectorizeLoop && !InterleaveLoop) {
10479     // Do not vectorize or interleaving the loop.
10480     ORE->emit([&]() {
10481       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10482                                       L->getStartLoc(), L->getHeader())
10483              << VecDiagMsg.second;
10484     });
10485     ORE->emit([&]() {
10486       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10487                                       L->getStartLoc(), L->getHeader())
10488              << IntDiagMsg.second;
10489     });
10490     return false;
10491   } else if (!VectorizeLoop && InterleaveLoop) {
10492     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10493     ORE->emit([&]() {
10494       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10495                                         L->getStartLoc(), L->getHeader())
10496              << VecDiagMsg.second;
10497     });
10498   } else if (VectorizeLoop && !InterleaveLoop) {
10499     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10500                       << ") in " << DebugLocStr << '\n');
10501     ORE->emit([&]() {
10502       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10503                                         L->getStartLoc(), L->getHeader())
10504              << IntDiagMsg.second;
10505     });
10506   } else if (VectorizeLoop && InterleaveLoop) {
10507     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10508                       << ") in " << DebugLocStr << '\n');
10509     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10510   }
10511 
10512   bool DisableRuntimeUnroll = false;
10513   MDNode *OrigLoopID = L->getLoopID();
10514   {
10515     // Optimistically generate runtime checks. Drop them if they turn out to not
10516     // be profitable. Limit the scope of Checks, so the cleanup happens
10517     // immediately after vector codegeneration is done.
10518     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10519                              F->getParent()->getDataLayout());
10520     if (!VF.Width.isScalar() || IC > 1)
10521       Checks.Create(L, *LVL.getLAI(), PSE.getPredicate());
10522 
10523     using namespace ore;
10524     if (!VectorizeLoop) {
10525       assert(IC > 1 && "interleave count should not be 1 or 0");
10526       // If we decided that it is not legal to vectorize the loop, then
10527       // interleave it.
10528       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
10529                                  &CM, BFI, PSI, Checks);
10530 
10531       VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10532       LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT);
10533 
10534       ORE->emit([&]() {
10535         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10536                                   L->getHeader())
10537                << "interleaved loop (interleaved count: "
10538                << NV("InterleaveCount", IC) << ")";
10539       });
10540     } else {
10541       // If we decided that it is *legal* to vectorize the loop, then do it.
10542 
10543       // Consider vectorizing the epilogue too if it's profitable.
10544       VectorizationFactor EpilogueVF =
10545           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
10546       if (EpilogueVF.Width.isVector()) {
10547 
10548         // The first pass vectorizes the main loop and creates a scalar epilogue
10549         // to be vectorized by executing the plan (potentially with a different
10550         // factor) again shortly afterwards.
10551         EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1);
10552         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
10553                                            EPI, &LVL, &CM, BFI, PSI, Checks);
10554 
10555         VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF);
10556         LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV,
10557                         DT);
10558         ++LoopsVectorized;
10559 
10560         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10561         formLCSSARecursively(*L, *DT, LI, SE);
10562 
10563         // Second pass vectorizes the epilogue and adjusts the control flow
10564         // edges from the first pass.
10565         EPI.MainLoopVF = EPI.EpilogueVF;
10566         EPI.MainLoopUF = EPI.EpilogueUF;
10567         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
10568                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
10569                                                  Checks);
10570 
10571         VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF);
10572         BestEpiPlan.getVectorLoopRegion()->getEntryBasicBlock()->setName(
10573             "vec.epilog.vector.body");
10574 
10575         // Ensure that the start values for any VPReductionPHIRecipes are
10576         // updated before vectorising the epilogue loop.
10577         VPBasicBlock *Header =
10578             BestEpiPlan.getVectorLoopRegion()->getEntryBasicBlock();
10579         for (VPRecipeBase &R : Header->phis()) {
10580           if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
10581             if (auto *Resume = MainILV.getReductionResumeValue(
10582                     ReductionPhi->getRecurrenceDescriptor())) {
10583               VPValue *StartVal = new VPValue(Resume);
10584               BestEpiPlan.addExternalDef(StartVal);
10585               ReductionPhi->setOperand(0, StartVal);
10586             }
10587           }
10588         }
10589 
10590         LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV,
10591                         DT);
10592         ++LoopsEpilogueVectorized;
10593 
10594         if (!MainILV.areSafetyChecksAdded())
10595           DisableRuntimeUnroll = true;
10596       } else {
10597         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
10598                                &LVL, &CM, BFI, PSI, Checks);
10599 
10600         VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10601         LVP.executePlan(VF.Width, IC, BestPlan, LB, DT);
10602         ++LoopsVectorized;
10603 
10604         // Add metadata to disable runtime unrolling a scalar loop when there
10605         // are no runtime checks about strides and memory. A scalar loop that is
10606         // rarely used is not worth unrolling.
10607         if (!LB.areSafetyChecksAdded())
10608           DisableRuntimeUnroll = true;
10609       }
10610       // Report the vectorization decision.
10611       ORE->emit([&]() {
10612         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
10613                                   L->getHeader())
10614                << "vectorized loop (vectorization width: "
10615                << NV("VectorizationFactor", VF.Width)
10616                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
10617       });
10618     }
10619 
10620     if (ORE->allowExtraAnalysis(LV_NAME))
10621       checkMixedPrecision(L, ORE);
10622   }
10623 
10624   Optional<MDNode *> RemainderLoopID =
10625       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
10626                                       LLVMLoopVectorizeFollowupEpilogue});
10627   if (RemainderLoopID.hasValue()) {
10628     L->setLoopID(RemainderLoopID.getValue());
10629   } else {
10630     if (DisableRuntimeUnroll)
10631       AddRuntimeUnrollDisableMetaData(L);
10632 
10633     // Mark the loop as already vectorized to avoid vectorizing again.
10634     Hints.setAlreadyVectorized();
10635   }
10636 
10637   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10638   return true;
10639 }
10640 
10641 LoopVectorizeResult LoopVectorizePass::runImpl(
10642     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
10643     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
10644     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
10645     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
10646     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
10647   SE = &SE_;
10648   LI = &LI_;
10649   TTI = &TTI_;
10650   DT = &DT_;
10651   BFI = &BFI_;
10652   TLI = TLI_;
10653   AA = &AA_;
10654   AC = &AC_;
10655   GetLAA = &GetLAA_;
10656   DB = &DB_;
10657   ORE = &ORE_;
10658   PSI = PSI_;
10659 
10660   // Don't attempt if
10661   // 1. the target claims to have no vector registers, and
10662   // 2. interleaving won't help ILP.
10663   //
10664   // The second condition is necessary because, even if the target has no
10665   // vector registers, loop vectorization may still enable scalar
10666   // interleaving.
10667   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10668       TTI->getMaxInterleaveFactor(1) < 2)
10669     return LoopVectorizeResult(false, false);
10670 
10671   bool Changed = false, CFGChanged = false;
10672 
10673   // The vectorizer requires loops to be in simplified form.
10674   // Since simplification may add new inner loops, it has to run before the
10675   // legality and profitability checks. This means running the loop vectorizer
10676   // will simplify all loops, regardless of whether anything end up being
10677   // vectorized.
10678   for (auto &L : *LI)
10679     Changed |= CFGChanged |=
10680         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10681 
10682   // Build up a worklist of inner-loops to vectorize. This is necessary as
10683   // the act of vectorizing or partially unrolling a loop creates new loops
10684   // and can invalidate iterators across the loops.
10685   SmallVector<Loop *, 8> Worklist;
10686 
10687   for (Loop *L : *LI)
10688     collectSupportedLoops(*L, LI, ORE, Worklist);
10689 
10690   LoopsAnalyzed += Worklist.size();
10691 
10692   // Now walk the identified inner loops.
10693   while (!Worklist.empty()) {
10694     Loop *L = Worklist.pop_back_val();
10695 
10696     // For the inner loops we actually process, form LCSSA to simplify the
10697     // transform.
10698     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10699 
10700     Changed |= CFGChanged |= processLoop(L);
10701   }
10702 
10703   // Process each loop nest in the function.
10704   return LoopVectorizeResult(Changed, CFGChanged);
10705 }
10706 
10707 PreservedAnalyses LoopVectorizePass::run(Function &F,
10708                                          FunctionAnalysisManager &AM) {
10709     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10710     auto &LI = AM.getResult<LoopAnalysis>(F);
10711     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10712     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10713     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10714     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10715     auto &AA = AM.getResult<AAManager>(F);
10716     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10717     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10718     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10719 
10720     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10721     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10722         [&](Loop &L) -> const LoopAccessInfo & {
10723       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,      SE,
10724                                         TLI, TTI, nullptr, nullptr, nullptr};
10725       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10726     };
10727     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10728     ProfileSummaryInfo *PSI =
10729         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10730     LoopVectorizeResult Result =
10731         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10732     if (!Result.MadeAnyChange)
10733       return PreservedAnalyses::all();
10734     PreservedAnalyses PA;
10735 
10736     // We currently do not preserve loopinfo/dominator analyses with outer loop
10737     // vectorization. Until this is addressed, mark these analyses as preserved
10738     // only for non-VPlan-native path.
10739     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10740     if (!EnableVPlanNativePath) {
10741       PA.preserve<LoopAnalysis>();
10742       PA.preserve<DominatorTreeAnalysis>();
10743     }
10744 
10745     if (Result.MadeCFGChange) {
10746       // Making CFG changes likely means a loop got vectorized. Indicate that
10747       // extra simplification passes should be run.
10748       // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
10749       // be run if runtime checks have been added.
10750       AM.getResult<ShouldRunExtraVectorPasses>(F);
10751       PA.preserve<ShouldRunExtraVectorPasses>();
10752     } else {
10753       PA.preserveSet<CFGAnalyses>();
10754     }
10755     return PA;
10756 }
10757 
10758 void LoopVectorizePass::printPipeline(
10759     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10760   static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10761       OS, MapClassName2PassName);
10762 
10763   OS << "<";
10764   OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10765   OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10766   OS << ">";
10767 }
10768