1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
91 #include "llvm/Analysis/ProfileSummaryInfo.h"
92 #include "llvm/Analysis/ScalarEvolution.h"
93 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
94 #include "llvm/Analysis/TargetLibraryInfo.h"
95 #include "llvm/Analysis/TargetTransformInfo.h"
96 #include "llvm/Analysis/VectorUtils.h"
97 #include "llvm/IR/Attributes.h"
98 #include "llvm/IR/BasicBlock.h"
99 #include "llvm/IR/CFG.h"
100 #include "llvm/IR/Constant.h"
101 #include "llvm/IR/Constants.h"
102 #include "llvm/IR/DataLayout.h"
103 #include "llvm/IR/DebugInfoMetadata.h"
104 #include "llvm/IR/DebugLoc.h"
105 #include "llvm/IR/DerivedTypes.h"
106 #include "llvm/IR/DiagnosticInfo.h"
107 #include "llvm/IR/Dominators.h"
108 #include "llvm/IR/Function.h"
109 #include "llvm/IR/IRBuilder.h"
110 #include "llvm/IR/InstrTypes.h"
111 #include "llvm/IR/Instruction.h"
112 #include "llvm/IR/Instructions.h"
113 #include "llvm/IR/IntrinsicInst.h"
114 #include "llvm/IR/Intrinsics.h"
115 #include "llvm/IR/Metadata.h"
116 #include "llvm/IR/Module.h"
117 #include "llvm/IR/Operator.h"
118 #include "llvm/IR/PatternMatch.h"
119 #include "llvm/IR/Type.h"
120 #include "llvm/IR/Use.h"
121 #include "llvm/IR/User.h"
122 #include "llvm/IR/Value.h"
123 #include "llvm/IR/ValueHandle.h"
124 #include "llvm/IR/Verifier.h"
125 #include "llvm/InitializePasses.h"
126 #include "llvm/Pass.h"
127 #include "llvm/Support/Casting.h"
128 #include "llvm/Support/CommandLine.h"
129 #include "llvm/Support/Compiler.h"
130 #include "llvm/Support/Debug.h"
131 #include "llvm/Support/ErrorHandling.h"
132 #include "llvm/Support/InstructionCost.h"
133 #include "llvm/Support/MathExtras.h"
134 #include "llvm/Support/raw_ostream.h"
135 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
136 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
137 #include "llvm/Transforms/Utils/LoopSimplify.h"
138 #include "llvm/Transforms/Utils/LoopUtils.h"
139 #include "llvm/Transforms/Utils/LoopVersioning.h"
140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
141 #include "llvm/Transforms/Utils/SizeOpts.h"
142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
143 #include <algorithm>
144 #include <cassert>
145 #include <cstdint>
146 #include <functional>
147 #include <iterator>
148 #include <limits>
149 #include <map>
150 #include <memory>
151 #include <string>
152 #include <tuple>
153 #include <utility>
154 
155 using namespace llvm;
156 
157 #define LV_NAME "loop-vectorize"
158 #define DEBUG_TYPE LV_NAME
159 
160 #ifndef NDEBUG
161 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
162 #endif
163 
164 /// @{
165 /// Metadata attribute names
166 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
167 const char LLVMLoopVectorizeFollowupVectorized[] =
168     "llvm.loop.vectorize.followup_vectorized";
169 const char LLVMLoopVectorizeFollowupEpilogue[] =
170     "llvm.loop.vectorize.followup_epilogue";
171 /// @}
172 
173 STATISTIC(LoopsVectorized, "Number of loops vectorized");
174 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
175 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
176 
177 static cl::opt<bool> EnableEpilogueVectorization(
178     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
179     cl::desc("Enable vectorization of epilogue loops."));
180 
181 static cl::opt<unsigned> EpilogueVectorizationForceVF(
182     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
183     cl::desc("When epilogue vectorization is enabled, and a value greater than "
184              "1 is specified, forces the given VF for all applicable epilogue "
185              "loops."));
186 
187 static cl::opt<unsigned> EpilogueVectorizationMinVF(
188     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
189     cl::desc("Only loops with vectorization factor equal to or larger than "
190              "the specified value are considered for epilogue vectorization."));
191 
192 /// Loops with a known constant trip count below this number are vectorized only
193 /// if no scalar iteration overheads are incurred.
194 static cl::opt<unsigned> TinyTripCountVectorThreshold(
195     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
196     cl::desc("Loops with a constant trip count that is smaller than this "
197              "value are vectorized only if no scalar iteration overheads "
198              "are incurred."));
199 
200 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
201     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
202     cl::desc("The maximum allowed number of runtime memory checks with a "
203              "vectorize(enable) pragma."));
204 
205 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
206 // that predication is preferred, and this lists all options. I.e., the
207 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
208 // and predicate the instructions accordingly. If tail-folding fails, there are
209 // different fallback strategies depending on these values:
210 namespace PreferPredicateTy {
211   enum Option {
212     ScalarEpilogue = 0,
213     PredicateElseScalarEpilogue,
214     PredicateOrDontVectorize
215   };
216 } // namespace PreferPredicateTy
217 
218 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
219     "prefer-predicate-over-epilogue",
220     cl::init(PreferPredicateTy::ScalarEpilogue),
221     cl::Hidden,
222     cl::desc("Tail-folding and predication preferences over creating a scalar "
223              "epilogue loop."),
224     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
225                          "scalar-epilogue",
226                          "Don't tail-predicate loops, create scalar epilogue"),
227               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
228                          "predicate-else-scalar-epilogue",
229                          "prefer tail-folding, create scalar epilogue if tail "
230                          "folding fails."),
231               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
232                          "predicate-dont-vectorize",
233                          "prefers tail-folding, don't attempt vectorization if "
234                          "tail-folding fails.")));
235 
236 static cl::opt<bool> MaximizeBandwidth(
237     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
238     cl::desc("Maximize bandwidth when selecting vectorization factor which "
239              "will be determined by the smallest type in loop."));
240 
241 static cl::opt<bool> EnableInterleavedMemAccesses(
242     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
243     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
244 
245 /// An interleave-group may need masking if it resides in a block that needs
246 /// predication, or in order to mask away gaps.
247 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
248     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
249     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
250 
251 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
252     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
253     cl::desc("We don't interleave loops with a estimated constant trip count "
254              "below this number"));
255 
256 static cl::opt<unsigned> ForceTargetNumScalarRegs(
257     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
258     cl::desc("A flag that overrides the target's number of scalar registers."));
259 
260 static cl::opt<unsigned> ForceTargetNumVectorRegs(
261     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
262     cl::desc("A flag that overrides the target's number of vector registers."));
263 
264 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
265     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
266     cl::desc("A flag that overrides the target's max interleave factor for "
267              "scalar loops."));
268 
269 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
270     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
271     cl::desc("A flag that overrides the target's max interleave factor for "
272              "vectorized loops."));
273 
274 static cl::opt<unsigned> ForceTargetInstructionCost(
275     "force-target-instruction-cost", cl::init(0), cl::Hidden,
276     cl::desc("A flag that overrides the target's expected cost for "
277              "an instruction to a single constant value. Mostly "
278              "useful for getting consistent testing."));
279 
280 static cl::opt<bool> ForceTargetSupportsScalableVectors(
281     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
282     cl::desc(
283         "Pretend that scalable vectors are supported, even if the target does "
284         "not support them. This flag should only be used for testing."));
285 
286 static cl::opt<unsigned> SmallLoopCost(
287     "small-loop-cost", cl::init(20), cl::Hidden,
288     cl::desc(
289         "The cost of a loop that is considered 'small' by the interleaver."));
290 
291 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
292     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
293     cl::desc("Enable the use of the block frequency analysis to access PGO "
294              "heuristics minimizing code growth in cold regions and being more "
295              "aggressive in hot regions."));
296 
297 // Runtime interleave loops for load/store throughput.
298 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
299     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
300     cl::desc(
301         "Enable runtime interleaving until load/store ports are saturated"));
302 
303 /// Interleave small loops with scalar reductions.
304 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
305     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
306     cl::desc("Enable interleaving for loops with small iteration counts that "
307              "contain scalar reductions to expose ILP."));
308 
309 /// The number of stores in a loop that are allowed to need predication.
310 static cl::opt<unsigned> NumberOfStoresToPredicate(
311     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
312     cl::desc("Max number of stores to be predicated behind an if."));
313 
314 static cl::opt<bool> EnableIndVarRegisterHeur(
315     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
316     cl::desc("Count the induction variable only once when interleaving"));
317 
318 static cl::opt<bool> EnableCondStoresVectorization(
319     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
320     cl::desc("Enable if predication of stores during vectorization."));
321 
322 static cl::opt<unsigned> MaxNestedScalarReductionIC(
323     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
324     cl::desc("The maximum interleave count to use when interleaving a scalar "
325              "reduction in a nested loop."));
326 
327 static cl::opt<bool>
328     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
329                            cl::Hidden,
330                            cl::desc("Prefer in-loop vector reductions, "
331                                     "overriding the targets preference."));
332 
333 static cl::opt<bool> ForceOrderedReductions(
334     "force-ordered-reductions", cl::init(false), cl::Hidden,
335     cl::desc("Enable the vectorisation of loops with in-order (strict) "
336              "FP reductions"));
337 
338 static cl::opt<bool> PreferPredicatedReductionSelect(
339     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
340     cl::desc(
341         "Prefer predicating a reduction operation over an after loop select."));
342 
343 cl::opt<bool> EnableVPlanNativePath(
344     "enable-vplan-native-path", cl::init(false), cl::Hidden,
345     cl::desc("Enable VPlan-native vectorization path with "
346              "support for outer loop vectorization."));
347 
348 // FIXME: Remove this switch once we have divergence analysis. Currently we
349 // assume divergent non-backedge branches when this switch is true.
350 cl::opt<bool> EnableVPlanPredication(
351     "enable-vplan-predication", cl::init(false), cl::Hidden,
352     cl::desc("Enable VPlan-native vectorization path predicator with "
353              "support for outer loop vectorization."));
354 
355 // This flag enables the stress testing of the VPlan H-CFG construction in the
356 // VPlan-native vectorization path. It must be used in conjuction with
357 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
358 // verification of the H-CFGs built.
359 static cl::opt<bool> VPlanBuildStressTest(
360     "vplan-build-stress-test", cl::init(false), cl::Hidden,
361     cl::desc(
362         "Build VPlan for every supported loop nest in the function and bail "
363         "out right after the build (stress test the VPlan H-CFG construction "
364         "in the VPlan-native vectorization path)."));
365 
366 cl::opt<bool> llvm::EnableLoopInterleaving(
367     "interleave-loops", cl::init(true), cl::Hidden,
368     cl::desc("Enable loop interleaving in Loop vectorization passes"));
369 cl::opt<bool> llvm::EnableLoopVectorization(
370     "vectorize-loops", cl::init(true), cl::Hidden,
371     cl::desc("Run the Loop vectorization passes"));
372 
373 cl::opt<bool> PrintVPlansInDotFormat(
374     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
375     cl::desc("Use dot format instead of plain text when dumping VPlans"));
376 
377 /// A helper function that returns true if the given type is irregular. The
378 /// type is irregular if its allocated size doesn't equal the store size of an
379 /// element of the corresponding vector type.
380 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
381   // Determine if an array of N elements of type Ty is "bitcast compatible"
382   // with a <N x Ty> vector.
383   // This is only true if there is no padding between the array elements.
384   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
385 }
386 
387 /// A helper function that returns the reciprocal of the block probability of
388 /// predicated blocks. If we return X, we are assuming the predicated block
389 /// will execute once for every X iterations of the loop header.
390 ///
391 /// TODO: We should use actual block probability here, if available. Currently,
392 ///       we always assume predicated blocks have a 50% chance of executing.
393 static unsigned getReciprocalPredBlockProb() { return 2; }
394 
395 /// A helper function that returns an integer or floating-point constant with
396 /// value C.
397 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
398   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
399                            : ConstantFP::get(Ty, C);
400 }
401 
402 /// Returns "best known" trip count for the specified loop \p L as defined by
403 /// the following procedure:
404 ///   1) Returns exact trip count if it is known.
405 ///   2) Returns expected trip count according to profile data if any.
406 ///   3) Returns upper bound estimate if it is known.
407 ///   4) Returns None if all of the above failed.
408 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
409   // Check if exact trip count is known.
410   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
411     return ExpectedTC;
412 
413   // Check if there is an expected trip count available from profile data.
414   if (LoopVectorizeWithBlockFrequency)
415     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
416       return EstimatedTC;
417 
418   // Check if upper bound estimate is known.
419   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
420     return ExpectedTC;
421 
422   return None;
423 }
424 
425 // Forward declare GeneratedRTChecks.
426 class GeneratedRTChecks;
427 
428 namespace llvm {
429 
430 AnalysisKey ShouldRunExtraVectorPasses::Key;
431 
432 /// InnerLoopVectorizer vectorizes loops which contain only one basic
433 /// block to a specified vectorization factor (VF).
434 /// This class performs the widening of scalars into vectors, or multiple
435 /// scalars. This class also implements the following features:
436 /// * It inserts an epilogue loop for handling loops that don't have iteration
437 ///   counts that are known to be a multiple of the vectorization factor.
438 /// * It handles the code generation for reduction variables.
439 /// * Scalarization (implementation using scalars) of un-vectorizable
440 ///   instructions.
441 /// InnerLoopVectorizer does not perform any vectorization-legality
442 /// checks, and relies on the caller to check for the different legality
443 /// aspects. The InnerLoopVectorizer relies on the
444 /// LoopVectorizationLegality class to provide information about the induction
445 /// and reduction variables that were found to a given vectorization factor.
446 class InnerLoopVectorizer {
447 public:
448   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
449                       LoopInfo *LI, DominatorTree *DT,
450                       const TargetLibraryInfo *TLI,
451                       const TargetTransformInfo *TTI, AssumptionCache *AC,
452                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
453                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
454                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
455                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
456       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
457         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
458         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
459         PSI(PSI), RTChecks(RTChecks) {
460     // Query this against the original loop and save it here because the profile
461     // of the original loop header may change as the transformation happens.
462     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
463         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
464   }
465 
466   virtual ~InnerLoopVectorizer() = default;
467 
468   /// Create a new empty loop that will contain vectorized instructions later
469   /// on, while the old loop will be used as the scalar remainder. Control flow
470   /// is generated around the vectorized (and scalar epilogue) loops consisting
471   /// of various checks and bypasses. Return the pre-header block of the new
472   /// loop and the start value for the canonical induction, if it is != 0. The
473   /// latter is the case when vectorizing the epilogue loop. In the case of
474   /// epilogue vectorization, this function is overriden to handle the more
475   /// complex control flow around the loops.
476   virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton();
477 
478   /// Widen a single call instruction within the innermost loop.
479   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
480                             VPTransformState &State);
481 
482   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
483   void fixVectorizedLoop(VPTransformState &State, VPlan &Plan);
484 
485   // Return true if any runtime check is added.
486   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
487 
488   /// A type for vectorized values in the new loop. Each value from the
489   /// original loop, when vectorized, is represented by UF vector values in the
490   /// new unrolled loop, where UF is the unroll factor.
491   using VectorParts = SmallVector<Value *, 2>;
492 
493   /// Vectorize a single vector PHINode in a block in the VPlan-native path
494   /// only.
495   void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR,
496                            VPTransformState &State);
497 
498   /// A helper function to scalarize a single Instruction in the innermost loop.
499   /// Generates a sequence of scalar instances for each lane between \p MinLane
500   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
501   /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p
502   /// Instr's operands.
503   void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe,
504                             const VPIteration &Instance, bool IfPredicateInstr,
505                             VPTransformState &State);
506 
507   /// Construct the vector value of a scalarized value \p V one lane at a time.
508   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
509                                  VPTransformState &State);
510 
511   /// Try to vectorize interleaved access group \p Group with the base address
512   /// given in \p Addr, optionally masking the vector operations if \p
513   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
514   /// values in the vectorized loop.
515   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
516                                 ArrayRef<VPValue *> VPDefs,
517                                 VPTransformState &State, VPValue *Addr,
518                                 ArrayRef<VPValue *> StoredValues,
519                                 VPValue *BlockInMask = nullptr);
520 
521   /// Set the debug location in the builder \p Ptr using the debug location in
522   /// \p V. If \p Ptr is None then it uses the class member's Builder.
523   void setDebugLocFromInst(const Value *V,
524                            Optional<IRBuilderBase *> CustomBuilder = None);
525 
526   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
527   void fixNonInductionPHIs(VPTransformState &State);
528 
529   /// Returns true if the reordering of FP operations is not allowed, but we are
530   /// able to vectorize with strict in-order reductions for the given RdxDesc.
531   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc);
532 
533   /// Create a broadcast instruction. This method generates a broadcast
534   /// instruction (shuffle) for loop invariant values and for the induction
535   /// value. If this is the induction variable then we extend it to N, N+1, ...
536   /// this is needed because each iteration in the loop corresponds to a SIMD
537   /// element.
538   virtual Value *getBroadcastInstrs(Value *V);
539 
540   /// Add metadata from one instruction to another.
541   ///
542   /// This includes both the original MDs from \p From and additional ones (\see
543   /// addNewMetadata).  Use this for *newly created* instructions in the vector
544   /// loop.
545   void addMetadata(Instruction *To, Instruction *From);
546 
547   /// Similar to the previous function but it adds the metadata to a
548   /// vector of instructions.
549   void addMetadata(ArrayRef<Value *> To, Instruction *From);
550 
551   // Returns the resume value (bc.merge.rdx) for a reduction as
552   // generated by fixReduction.
553   PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc);
554 
555 protected:
556   friend class LoopVectorizationPlanner;
557 
558   /// A small list of PHINodes.
559   using PhiVector = SmallVector<PHINode *, 4>;
560 
561   /// A type for scalarized values in the new loop. Each value from the
562   /// original loop, when scalarized, is represented by UF x VF scalar values
563   /// in the new unrolled loop, where UF is the unroll factor and VF is the
564   /// vectorization factor.
565   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
566 
567   /// Set up the values of the IVs correctly when exiting the vector loop.
568   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
569                     Value *VectorTripCount, Value *EndValue,
570                     BasicBlock *MiddleBlock, BasicBlock *VectorHeader);
571 
572   /// Handle all cross-iteration phis in the header.
573   void fixCrossIterationPHIs(VPTransformState &State);
574 
575   /// Create the exit value of first order recurrences in the middle block and
576   /// update their users.
577   void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR,
578                                VPTransformState &State);
579 
580   /// Create code for the loop exit value of the reduction.
581   void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State);
582 
583   /// Clear NSW/NUW flags from reduction instructions if necessary.
584   void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
585                                VPTransformState &State);
586 
587   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
588   /// means we need to add the appropriate incoming value from the middle
589   /// block as exiting edges from the scalar epilogue loop (if present) are
590   /// already in place, and we exit the vector loop exclusively to the middle
591   /// block.
592   void fixLCSSAPHIs(VPTransformState &State);
593 
594   /// Iteratively sink the scalarized operands of a predicated instruction into
595   /// the block that was created for it.
596   void sinkScalarOperands(Instruction *PredInst);
597 
598   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
599   /// represented as.
600   void truncateToMinimalBitwidths(VPTransformState &State);
601 
602   /// Returns (and creates if needed) the original loop trip count.
603   Value *getOrCreateTripCount(BasicBlock *InsertBlock);
604 
605   /// Returns (and creates if needed) the trip count of the widened loop.
606   Value *getOrCreateVectorTripCount(BasicBlock *InsertBlock);
607 
608   /// Returns a bitcasted value to the requested vector type.
609   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
610   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
611                                 const DataLayout &DL);
612 
613   /// Emit a bypass check to see if the vector trip count is zero, including if
614   /// it overflows.
615   void emitMinimumIterationCountCheck(BasicBlock *Bypass);
616 
617   /// Emit a bypass check to see if all of the SCEV assumptions we've
618   /// had to make are correct. Returns the block containing the checks or
619   /// nullptr if no checks have been added.
620   BasicBlock *emitSCEVChecks(BasicBlock *Bypass);
621 
622   /// Emit bypass checks to check any memory assumptions we may have made.
623   /// Returns the block containing the checks or nullptr if no checks have been
624   /// added.
625   BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass);
626 
627   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
628   /// vector loop preheader, middle block and scalar preheader.
629   void createVectorLoopSkeleton(StringRef Prefix);
630 
631   /// Create new phi nodes for the induction variables to resume iteration count
632   /// in the scalar epilogue, from where the vectorized loop left off.
633   /// In cases where the loop skeleton is more complicated (eg. epilogue
634   /// vectorization) and the resume values can come from an additional bypass
635   /// block, the \p AdditionalBypass pair provides information about the bypass
636   /// block and the end value on the edge from bypass to this loop.
637   void createInductionResumeValues(
638       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
639 
640   /// Complete the loop skeleton by adding debug MDs, creating appropriate
641   /// conditional branches in the middle block, preparing the builder and
642   /// running the verifier. Return the preheader of the completed vector loop.
643   BasicBlock *completeLoopSkeleton(MDNode *OrigLoopID);
644 
645   /// Add additional metadata to \p To that was not present on \p Orig.
646   ///
647   /// Currently this is used to add the noalias annotations based on the
648   /// inserted memchecks.  Use this for instructions that are *cloned* into the
649   /// vector loop.
650   void addNewMetadata(Instruction *To, const Instruction *Orig);
651 
652   /// Collect poison-generating recipes that may generate a poison value that is
653   /// used after vectorization, even when their operands are not poison. Those
654   /// recipes meet the following conditions:
655   ///  * Contribute to the address computation of a recipe generating a widen
656   ///    memory load/store (VPWidenMemoryInstructionRecipe or
657   ///    VPInterleaveRecipe).
658   ///  * Such a widen memory load/store has at least one underlying Instruction
659   ///    that is in a basic block that needs predication and after vectorization
660   ///    the generated instruction won't be predicated.
661   void collectPoisonGeneratingRecipes(VPTransformState &State);
662 
663   /// Allow subclasses to override and print debug traces before/after vplan
664   /// execution, when trace information is requested.
665   virtual void printDebugTracesAtStart(){};
666   virtual void printDebugTracesAtEnd(){};
667 
668   /// The original loop.
669   Loop *OrigLoop;
670 
671   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
672   /// dynamic knowledge to simplify SCEV expressions and converts them to a
673   /// more usable form.
674   PredicatedScalarEvolution &PSE;
675 
676   /// Loop Info.
677   LoopInfo *LI;
678 
679   /// Dominator Tree.
680   DominatorTree *DT;
681 
682   /// Alias Analysis.
683   AAResults *AA;
684 
685   /// Target Library Info.
686   const TargetLibraryInfo *TLI;
687 
688   /// Target Transform Info.
689   const TargetTransformInfo *TTI;
690 
691   /// Assumption Cache.
692   AssumptionCache *AC;
693 
694   /// Interface to emit optimization remarks.
695   OptimizationRemarkEmitter *ORE;
696 
697   /// LoopVersioning.  It's only set up (non-null) if memchecks were
698   /// used.
699   ///
700   /// This is currently only used to add no-alias metadata based on the
701   /// memchecks.  The actually versioning is performed manually.
702   std::unique_ptr<LoopVersioning> LVer;
703 
704   /// The vectorization SIMD factor to use. Each vector will have this many
705   /// vector elements.
706   ElementCount VF;
707 
708   /// The vectorization unroll factor to use. Each scalar is vectorized to this
709   /// many different vector instructions.
710   unsigned UF;
711 
712   /// The builder that we use
713   IRBuilder<> Builder;
714 
715   // --- Vectorization state ---
716 
717   /// The vector-loop preheader.
718   BasicBlock *LoopVectorPreHeader;
719 
720   /// The scalar-loop preheader.
721   BasicBlock *LoopScalarPreHeader;
722 
723   /// Middle Block between the vector and the scalar.
724   BasicBlock *LoopMiddleBlock;
725 
726   /// The unique ExitBlock of the scalar loop if one exists.  Note that
727   /// there can be multiple exiting edges reaching this block.
728   BasicBlock *LoopExitBlock;
729 
730   /// The scalar loop body.
731   BasicBlock *LoopScalarBody;
732 
733   /// A list of all bypass blocks. The first block is the entry of the loop.
734   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
735 
736   /// Store instructions that were predicated.
737   SmallVector<Instruction *, 4> PredicatedInstructions;
738 
739   /// Trip count of the original loop.
740   Value *TripCount = nullptr;
741 
742   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
743   Value *VectorTripCount = nullptr;
744 
745   /// The legality analysis.
746   LoopVectorizationLegality *Legal;
747 
748   /// The profitablity analysis.
749   LoopVectorizationCostModel *Cost;
750 
751   // Record whether runtime checks are added.
752   bool AddedSafetyChecks = false;
753 
754   // Holds the end values for each induction variable. We save the end values
755   // so we can later fix-up the external users of the induction variables.
756   DenseMap<PHINode *, Value *> IVEndValues;
757 
758   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
759   // fixed up at the end of vector code generation.
760   SmallVector<PHINode *, 8> OrigPHIsToFix;
761 
762   /// BFI and PSI are used to check for profile guided size optimizations.
763   BlockFrequencyInfo *BFI;
764   ProfileSummaryInfo *PSI;
765 
766   // Whether this loop should be optimized for size based on profile guided size
767   // optimizatios.
768   bool OptForSizeBasedOnProfile;
769 
770   /// Structure to hold information about generated runtime checks, responsible
771   /// for cleaning the checks, if vectorization turns out unprofitable.
772   GeneratedRTChecks &RTChecks;
773 
774   // Holds the resume values for reductions in the loops, used to set the
775   // correct start value of reduction PHIs when vectorizing the epilogue.
776   SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4>
777       ReductionResumeValues;
778 };
779 
780 class InnerLoopUnroller : public InnerLoopVectorizer {
781 public:
782   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
783                     LoopInfo *LI, DominatorTree *DT,
784                     const TargetLibraryInfo *TLI,
785                     const TargetTransformInfo *TTI, AssumptionCache *AC,
786                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
787                     LoopVectorizationLegality *LVL,
788                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
789                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
790       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
791                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
792                             BFI, PSI, Check) {}
793 
794 private:
795   Value *getBroadcastInstrs(Value *V) override;
796 };
797 
798 /// Encapsulate information regarding vectorization of a loop and its epilogue.
799 /// This information is meant to be updated and used across two stages of
800 /// epilogue vectorization.
801 struct EpilogueLoopVectorizationInfo {
802   ElementCount MainLoopVF = ElementCount::getFixed(0);
803   unsigned MainLoopUF = 0;
804   ElementCount EpilogueVF = ElementCount::getFixed(0);
805   unsigned EpilogueUF = 0;
806   BasicBlock *MainLoopIterationCountCheck = nullptr;
807   BasicBlock *EpilogueIterationCountCheck = nullptr;
808   BasicBlock *SCEVSafetyCheck = nullptr;
809   BasicBlock *MemSafetyCheck = nullptr;
810   Value *TripCount = nullptr;
811   Value *VectorTripCount = nullptr;
812 
813   EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF,
814                                 ElementCount EVF, unsigned EUF)
815       : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) {
816     assert(EUF == 1 &&
817            "A high UF for the epilogue loop is likely not beneficial.");
818   }
819 };
820 
821 /// An extension of the inner loop vectorizer that creates a skeleton for a
822 /// vectorized loop that has its epilogue (residual) also vectorized.
823 /// The idea is to run the vplan on a given loop twice, firstly to setup the
824 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
825 /// from the first step and vectorize the epilogue.  This is achieved by
826 /// deriving two concrete strategy classes from this base class and invoking
827 /// them in succession from the loop vectorizer planner.
828 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
829 public:
830   InnerLoopAndEpilogueVectorizer(
831       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
832       DominatorTree *DT, const TargetLibraryInfo *TLI,
833       const TargetTransformInfo *TTI, AssumptionCache *AC,
834       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
835       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
836       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
837       GeneratedRTChecks &Checks)
838       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
839                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
840                             Checks),
841         EPI(EPI) {}
842 
843   // Override this function to handle the more complex control flow around the
844   // three loops.
845   std::pair<BasicBlock *, Value *>
846   createVectorizedLoopSkeleton() final override {
847     return createEpilogueVectorizedLoopSkeleton();
848   }
849 
850   /// The interface for creating a vectorized skeleton using one of two
851   /// different strategies, each corresponding to one execution of the vplan
852   /// as described above.
853   virtual std::pair<BasicBlock *, Value *>
854   createEpilogueVectorizedLoopSkeleton() = 0;
855 
856   /// Holds and updates state information required to vectorize the main loop
857   /// and its epilogue in two separate passes. This setup helps us avoid
858   /// regenerating and recomputing runtime safety checks. It also helps us to
859   /// shorten the iteration-count-check path length for the cases where the
860   /// iteration count of the loop is so small that the main vector loop is
861   /// completely skipped.
862   EpilogueLoopVectorizationInfo &EPI;
863 };
864 
865 /// A specialized derived class of inner loop vectorizer that performs
866 /// vectorization of *main* loops in the process of vectorizing loops and their
867 /// epilogues.
868 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
869 public:
870   EpilogueVectorizerMainLoop(
871       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
872       DominatorTree *DT, const TargetLibraryInfo *TLI,
873       const TargetTransformInfo *TTI, AssumptionCache *AC,
874       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
875       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
876       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
877       GeneratedRTChecks &Check)
878       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
879                                        EPI, LVL, CM, BFI, PSI, Check) {}
880   /// Implements the interface for creating a vectorized skeleton using the
881   /// *main loop* strategy (ie the first pass of vplan execution).
882   std::pair<BasicBlock *, Value *>
883   createEpilogueVectorizedLoopSkeleton() final override;
884 
885 protected:
886   /// Emits an iteration count bypass check once for the main loop (when \p
887   /// ForEpilogue is false) and once for the epilogue loop (when \p
888   /// ForEpilogue is true).
889   BasicBlock *emitMinimumIterationCountCheck(BasicBlock *Bypass,
890                                              bool ForEpilogue);
891   void printDebugTracesAtStart() override;
892   void printDebugTracesAtEnd() override;
893 };
894 
895 // A specialized derived class of inner loop vectorizer that performs
896 // vectorization of *epilogue* loops in the process of vectorizing loops and
897 // their epilogues.
898 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
899 public:
900   EpilogueVectorizerEpilogueLoop(
901       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
902       DominatorTree *DT, const TargetLibraryInfo *TLI,
903       const TargetTransformInfo *TTI, AssumptionCache *AC,
904       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
905       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
906       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
907       GeneratedRTChecks &Checks)
908       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
909                                        EPI, LVL, CM, BFI, PSI, Checks) {
910     TripCount = EPI.TripCount;
911   }
912   /// Implements the interface for creating a vectorized skeleton using the
913   /// *epilogue loop* strategy (ie the second pass of vplan execution).
914   std::pair<BasicBlock *, Value *>
915   createEpilogueVectorizedLoopSkeleton() final override;
916 
917 protected:
918   /// Emits an iteration count bypass check after the main vector loop has
919   /// finished to see if there are any iterations left to execute by either
920   /// the vector epilogue or the scalar epilogue.
921   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(
922                                                       BasicBlock *Bypass,
923                                                       BasicBlock *Insert);
924   void printDebugTracesAtStart() override;
925   void printDebugTracesAtEnd() override;
926 };
927 } // end namespace llvm
928 
929 /// Look for a meaningful debug location on the instruction or it's
930 /// operands.
931 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
932   if (!I)
933     return I;
934 
935   DebugLoc Empty;
936   if (I->getDebugLoc() != Empty)
937     return I;
938 
939   for (Use &Op : I->operands()) {
940     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
941       if (OpInst->getDebugLoc() != Empty)
942         return OpInst;
943   }
944 
945   return I;
946 }
947 
948 void InnerLoopVectorizer::setDebugLocFromInst(
949     const Value *V, Optional<IRBuilderBase *> CustomBuilder) {
950   IRBuilderBase *B = (CustomBuilder == None) ? &Builder : *CustomBuilder;
951   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) {
952     const DILocation *DIL = Inst->getDebugLoc();
953 
954     // When a FSDiscriminator is enabled, we don't need to add the multiply
955     // factors to the discriminators.
956     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
957         !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) {
958       // FIXME: For scalable vectors, assume vscale=1.
959       auto NewDIL =
960           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
961       if (NewDIL)
962         B->SetCurrentDebugLocation(NewDIL.getValue());
963       else
964         LLVM_DEBUG(dbgs()
965                    << "Failed to create new discriminator: "
966                    << DIL->getFilename() << " Line: " << DIL->getLine());
967     } else
968       B->SetCurrentDebugLocation(DIL);
969   } else
970     B->SetCurrentDebugLocation(DebugLoc());
971 }
972 
973 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
974 /// is passed, the message relates to that particular instruction.
975 #ifndef NDEBUG
976 static void debugVectorizationMessage(const StringRef Prefix,
977                                       const StringRef DebugMsg,
978                                       Instruction *I) {
979   dbgs() << "LV: " << Prefix << DebugMsg;
980   if (I != nullptr)
981     dbgs() << " " << *I;
982   else
983     dbgs() << '.';
984   dbgs() << '\n';
985 }
986 #endif
987 
988 /// Create an analysis remark that explains why vectorization failed
989 ///
990 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
991 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
992 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
993 /// the location of the remark.  \return the remark object that can be
994 /// streamed to.
995 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
996     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
997   Value *CodeRegion = TheLoop->getHeader();
998   DebugLoc DL = TheLoop->getStartLoc();
999 
1000   if (I) {
1001     CodeRegion = I->getParent();
1002     // If there is no debug location attached to the instruction, revert back to
1003     // using the loop's.
1004     if (I->getDebugLoc())
1005       DL = I->getDebugLoc();
1006   }
1007 
1008   return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1009 }
1010 
1011 namespace llvm {
1012 
1013 /// Return a value for Step multiplied by VF.
1014 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF,
1015                        int64_t Step) {
1016   assert(Ty->isIntegerTy() && "Expected an integer step");
1017   Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue());
1018   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1019 }
1020 
1021 /// Return the runtime value for VF.
1022 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF) {
1023   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1024   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1025 }
1026 
1027 static Value *getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy,
1028                                   ElementCount VF) {
1029   assert(FTy->isFloatingPointTy() && "Expected floating point type!");
1030   Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits());
1031   Value *RuntimeVF = getRuntimeVF(B, IntTy, VF);
1032   return B.CreateUIToFP(RuntimeVF, FTy);
1033 }
1034 
1035 void reportVectorizationFailure(const StringRef DebugMsg,
1036                                 const StringRef OREMsg, const StringRef ORETag,
1037                                 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1038                                 Instruction *I) {
1039   LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
1040   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1041   ORE->emit(
1042       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1043       << "loop not vectorized: " << OREMsg);
1044 }
1045 
1046 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1047                              OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1048                              Instruction *I) {
1049   LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
1050   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1051   ORE->emit(
1052       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1053       << Msg);
1054 }
1055 
1056 } // end namespace llvm
1057 
1058 #ifndef NDEBUG
1059 /// \return string containing a file name and a line # for the given loop.
1060 static std::string getDebugLocString(const Loop *L) {
1061   std::string Result;
1062   if (L) {
1063     raw_string_ostream OS(Result);
1064     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1065       LoopDbgLoc.print(OS);
1066     else
1067       // Just print the module name.
1068       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1069     OS.flush();
1070   }
1071   return Result;
1072 }
1073 #endif
1074 
1075 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1076                                          const Instruction *Orig) {
1077   // If the loop was versioned with memchecks, add the corresponding no-alias
1078   // metadata.
1079   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1080     LVer->annotateInstWithNoAlias(To, Orig);
1081 }
1082 
1083 void InnerLoopVectorizer::collectPoisonGeneratingRecipes(
1084     VPTransformState &State) {
1085 
1086   // Collect recipes in the backward slice of `Root` that may generate a poison
1087   // value that is used after vectorization.
1088   SmallPtrSet<VPRecipeBase *, 16> Visited;
1089   auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1090     SmallVector<VPRecipeBase *, 16> Worklist;
1091     Worklist.push_back(Root);
1092 
1093     // Traverse the backward slice of Root through its use-def chain.
1094     while (!Worklist.empty()) {
1095       VPRecipeBase *CurRec = Worklist.back();
1096       Worklist.pop_back();
1097 
1098       if (!Visited.insert(CurRec).second)
1099         continue;
1100 
1101       // Prune search if we find another recipe generating a widen memory
1102       // instruction. Widen memory instructions involved in address computation
1103       // will lead to gather/scatter instructions, which don't need to be
1104       // handled.
1105       if (isa<VPWidenMemoryInstructionRecipe>(CurRec) ||
1106           isa<VPInterleaveRecipe>(CurRec) ||
1107           isa<VPScalarIVStepsRecipe>(CurRec) ||
1108           isa<VPCanonicalIVPHIRecipe>(CurRec))
1109         continue;
1110 
1111       // This recipe contributes to the address computation of a widen
1112       // load/store. Collect recipe if its underlying instruction has
1113       // poison-generating flags.
1114       Instruction *Instr = CurRec->getUnderlyingInstr();
1115       if (Instr && Instr->hasPoisonGeneratingFlags())
1116         State.MayGeneratePoisonRecipes.insert(CurRec);
1117 
1118       // Add new definitions to the worklist.
1119       for (VPValue *operand : CurRec->operands())
1120         if (VPDef *OpDef = operand->getDef())
1121           Worklist.push_back(cast<VPRecipeBase>(OpDef));
1122     }
1123   });
1124 
1125   // Traverse all the recipes in the VPlan and collect the poison-generating
1126   // recipes in the backward slice starting at the address of a VPWidenRecipe or
1127   // VPInterleaveRecipe.
1128   auto Iter = depth_first(
1129       VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry()));
1130   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1131     for (VPRecipeBase &Recipe : *VPBB) {
1132       if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) {
1133         Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr();
1134         VPDef *AddrDef = WidenRec->getAddr()->getDef();
1135         if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr &&
1136             Legal->blockNeedsPredication(UnderlyingInstr->getParent()))
1137           collectPoisonGeneratingInstrsInBackwardSlice(
1138               cast<VPRecipeBase>(AddrDef));
1139       } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1140         VPDef *AddrDef = InterleaveRec->getAddr()->getDef();
1141         if (AddrDef) {
1142           // Check if any member of the interleave group needs predication.
1143           const InterleaveGroup<Instruction> *InterGroup =
1144               InterleaveRec->getInterleaveGroup();
1145           bool NeedPredication = false;
1146           for (int I = 0, NumMembers = InterGroup->getNumMembers();
1147                I < NumMembers; ++I) {
1148             Instruction *Member = InterGroup->getMember(I);
1149             if (Member)
1150               NeedPredication |=
1151                   Legal->blockNeedsPredication(Member->getParent());
1152           }
1153 
1154           if (NeedPredication)
1155             collectPoisonGeneratingInstrsInBackwardSlice(
1156                 cast<VPRecipeBase>(AddrDef));
1157         }
1158       }
1159     }
1160   }
1161 }
1162 
1163 void InnerLoopVectorizer::addMetadata(Instruction *To,
1164                                       Instruction *From) {
1165   propagateMetadata(To, From);
1166   addNewMetadata(To, From);
1167 }
1168 
1169 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1170                                       Instruction *From) {
1171   for (Value *V : To) {
1172     if (Instruction *I = dyn_cast<Instruction>(V))
1173       addMetadata(I, From);
1174   }
1175 }
1176 
1177 PHINode *InnerLoopVectorizer::getReductionResumeValue(
1178     const RecurrenceDescriptor &RdxDesc) {
1179   auto It = ReductionResumeValues.find(&RdxDesc);
1180   assert(It != ReductionResumeValues.end() &&
1181          "Expected to find a resume value for the reduction.");
1182   return It->second;
1183 }
1184 
1185 namespace llvm {
1186 
1187 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1188 // lowered.
1189 enum ScalarEpilogueLowering {
1190 
1191   // The default: allowing scalar epilogues.
1192   CM_ScalarEpilogueAllowed,
1193 
1194   // Vectorization with OptForSize: don't allow epilogues.
1195   CM_ScalarEpilogueNotAllowedOptSize,
1196 
1197   // A special case of vectorisation with OptForSize: loops with a very small
1198   // trip count are considered for vectorization under OptForSize, thereby
1199   // making sure the cost of their loop body is dominant, free of runtime
1200   // guards and scalar iteration overheads.
1201   CM_ScalarEpilogueNotAllowedLowTripLoop,
1202 
1203   // Loop hint predicate indicating an epilogue is undesired.
1204   CM_ScalarEpilogueNotNeededUsePredicate,
1205 
1206   // Directive indicating we must either tail fold or not vectorize
1207   CM_ScalarEpilogueNotAllowedUsePredicate
1208 };
1209 
1210 /// ElementCountComparator creates a total ordering for ElementCount
1211 /// for the purposes of using it in a set structure.
1212 struct ElementCountComparator {
1213   bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
1214     return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
1215            std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
1216   }
1217 };
1218 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
1219 
1220 /// LoopVectorizationCostModel - estimates the expected speedups due to
1221 /// vectorization.
1222 /// In many cases vectorization is not profitable. This can happen because of
1223 /// a number of reasons. In this class we mainly attempt to predict the
1224 /// expected speedup/slowdowns due to the supported instruction set. We use the
1225 /// TargetTransformInfo to query the different backends for the cost of
1226 /// different operations.
1227 class LoopVectorizationCostModel {
1228 public:
1229   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1230                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1231                              LoopVectorizationLegality *Legal,
1232                              const TargetTransformInfo &TTI,
1233                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1234                              AssumptionCache *AC,
1235                              OptimizationRemarkEmitter *ORE, const Function *F,
1236                              const LoopVectorizeHints *Hints,
1237                              InterleavedAccessInfo &IAI)
1238       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1239         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1240         Hints(Hints), InterleaveInfo(IAI) {}
1241 
1242   /// \return An upper bound for the vectorization factors (both fixed and
1243   /// scalable). If the factors are 0, vectorization and interleaving should be
1244   /// avoided up front.
1245   FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
1246 
1247   /// \return True if runtime checks are required for vectorization, and false
1248   /// otherwise.
1249   bool runtimeChecksRequired();
1250 
1251   /// \return The most profitable vectorization factor and the cost of that VF.
1252   /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
1253   /// then this vectorization factor will be selected if vectorization is
1254   /// possible.
1255   VectorizationFactor
1256   selectVectorizationFactor(const ElementCountSet &CandidateVFs);
1257 
1258   VectorizationFactor
1259   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1260                                     const LoopVectorizationPlanner &LVP);
1261 
1262   /// Setup cost-based decisions for user vectorization factor.
1263   /// \return true if the UserVF is a feasible VF to be chosen.
1264   bool selectUserVectorizationFactor(ElementCount UserVF) {
1265     collectUniformsAndScalars(UserVF);
1266     collectInstsToScalarize(UserVF);
1267     return expectedCost(UserVF).first.isValid();
1268   }
1269 
1270   /// \return The size (in bits) of the smallest and widest types in the code
1271   /// that needs to be vectorized. We ignore values that remain scalar such as
1272   /// 64 bit loop indices.
1273   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1274 
1275   /// \return The desired interleave count.
1276   /// If interleave count has been specified by metadata it will be returned.
1277   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1278   /// are the selected vectorization factor and the cost of the selected VF.
1279   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1280 
1281   /// Memory access instruction may be vectorized in more than one way.
1282   /// Form of instruction after vectorization depends on cost.
1283   /// This function takes cost-based decisions for Load/Store instructions
1284   /// and collects them in a map. This decisions map is used for building
1285   /// the lists of loop-uniform and loop-scalar instructions.
1286   /// The calculated cost is saved with widening decision in order to
1287   /// avoid redundant calculations.
1288   void setCostBasedWideningDecision(ElementCount VF);
1289 
1290   /// A struct that represents some properties of the register usage
1291   /// of a loop.
1292   struct RegisterUsage {
1293     /// Holds the number of loop invariant values that are used in the loop.
1294     /// The key is ClassID of target-provided register class.
1295     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1296     /// Holds the maximum number of concurrent live intervals in the loop.
1297     /// The key is ClassID of target-provided register class.
1298     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1299   };
1300 
1301   /// \return Returns information about the register usages of the loop for the
1302   /// given vectorization factors.
1303   SmallVector<RegisterUsage, 8>
1304   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1305 
1306   /// Collect values we want to ignore in the cost model.
1307   void collectValuesToIgnore();
1308 
1309   /// Collect all element types in the loop for which widening is needed.
1310   void collectElementTypesForWidening();
1311 
1312   /// Split reductions into those that happen in the loop, and those that happen
1313   /// outside. In loop reductions are collected into InLoopReductionChains.
1314   void collectInLoopReductions();
1315 
1316   /// Returns true if we should use strict in-order reductions for the given
1317   /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
1318   /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
1319   /// of FP operations.
1320   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) {
1321     return !Hints->allowReordering() && RdxDesc.isOrdered();
1322   }
1323 
1324   /// \returns The smallest bitwidth each instruction can be represented with.
1325   /// The vector equivalents of these instructions should be truncated to this
1326   /// type.
1327   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1328     return MinBWs;
1329   }
1330 
1331   /// \returns True if it is more profitable to scalarize instruction \p I for
1332   /// vectorization factor \p VF.
1333   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1334     assert(VF.isVector() &&
1335            "Profitable to scalarize relevant only for VF > 1.");
1336 
1337     // Cost model is not run in the VPlan-native path - return conservative
1338     // result until this changes.
1339     if (EnableVPlanNativePath)
1340       return false;
1341 
1342     auto Scalars = InstsToScalarize.find(VF);
1343     assert(Scalars != InstsToScalarize.end() &&
1344            "VF not yet analyzed for scalarization profitability");
1345     return Scalars->second.find(I) != Scalars->second.end();
1346   }
1347 
1348   /// Returns true if \p I is known to be uniform after vectorization.
1349   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1350     if (VF.isScalar())
1351       return true;
1352 
1353     // Cost model is not run in the VPlan-native path - return conservative
1354     // result until this changes.
1355     if (EnableVPlanNativePath)
1356       return false;
1357 
1358     auto UniformsPerVF = Uniforms.find(VF);
1359     assert(UniformsPerVF != Uniforms.end() &&
1360            "VF not yet analyzed for uniformity");
1361     return UniformsPerVF->second.count(I);
1362   }
1363 
1364   /// Returns true if \p I is known to be scalar after vectorization.
1365   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1366     if (VF.isScalar())
1367       return true;
1368 
1369     // Cost model is not run in the VPlan-native path - return conservative
1370     // result until this changes.
1371     if (EnableVPlanNativePath)
1372       return false;
1373 
1374     auto ScalarsPerVF = Scalars.find(VF);
1375     assert(ScalarsPerVF != Scalars.end() &&
1376            "Scalar values are not calculated for VF");
1377     return ScalarsPerVF->second.count(I);
1378   }
1379 
1380   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1381   /// for vectorization factor \p VF.
1382   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1383     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1384            !isProfitableToScalarize(I, VF) &&
1385            !isScalarAfterVectorization(I, VF);
1386   }
1387 
1388   /// Decision that was taken during cost calculation for memory instruction.
1389   enum InstWidening {
1390     CM_Unknown,
1391     CM_Widen,         // For consecutive accesses with stride +1.
1392     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1393     CM_Interleave,
1394     CM_GatherScatter,
1395     CM_Scalarize
1396   };
1397 
1398   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1399   /// instruction \p I and vector width \p VF.
1400   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1401                            InstructionCost Cost) {
1402     assert(VF.isVector() && "Expected VF >=2");
1403     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1404   }
1405 
1406   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1407   /// interleaving group \p Grp and vector width \p VF.
1408   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1409                            ElementCount VF, InstWidening W,
1410                            InstructionCost Cost) {
1411     assert(VF.isVector() && "Expected VF >=2");
1412     /// Broadcast this decicion to all instructions inside the group.
1413     /// But the cost will be assigned to one instruction only.
1414     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1415       if (auto *I = Grp->getMember(i)) {
1416         if (Grp->getInsertPos() == I)
1417           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1418         else
1419           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1420       }
1421     }
1422   }
1423 
1424   /// Return the cost model decision for the given instruction \p I and vector
1425   /// width \p VF. Return CM_Unknown if this instruction did not pass
1426   /// through the cost modeling.
1427   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1428     assert(VF.isVector() && "Expected VF to be a vector VF");
1429     // Cost model is not run in the VPlan-native path - return conservative
1430     // result until this changes.
1431     if (EnableVPlanNativePath)
1432       return CM_GatherScatter;
1433 
1434     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1435     auto Itr = WideningDecisions.find(InstOnVF);
1436     if (Itr == WideningDecisions.end())
1437       return CM_Unknown;
1438     return Itr->second.first;
1439   }
1440 
1441   /// Return the vectorization cost for the given instruction \p I and vector
1442   /// width \p VF.
1443   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1444     assert(VF.isVector() && "Expected VF >=2");
1445     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1446     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1447            "The cost is not calculated");
1448     return WideningDecisions[InstOnVF].second;
1449   }
1450 
1451   /// Return True if instruction \p I is an optimizable truncate whose operand
1452   /// is an induction variable. Such a truncate will be removed by adding a new
1453   /// induction variable with the destination type.
1454   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1455     // If the instruction is not a truncate, return false.
1456     auto *Trunc = dyn_cast<TruncInst>(I);
1457     if (!Trunc)
1458       return false;
1459 
1460     // Get the source and destination types of the truncate.
1461     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1462     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1463 
1464     // If the truncate is free for the given types, return false. Replacing a
1465     // free truncate with an induction variable would add an induction variable
1466     // update instruction to each iteration of the loop. We exclude from this
1467     // check the primary induction variable since it will need an update
1468     // instruction regardless.
1469     Value *Op = Trunc->getOperand(0);
1470     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1471       return false;
1472 
1473     // If the truncated value is not an induction variable, return false.
1474     return Legal->isInductionPhi(Op);
1475   }
1476 
1477   /// Collects the instructions to scalarize for each predicated instruction in
1478   /// the loop.
1479   void collectInstsToScalarize(ElementCount VF);
1480 
1481   /// Collect Uniform and Scalar values for the given \p VF.
1482   /// The sets depend on CM decision for Load/Store instructions
1483   /// that may be vectorized as interleave, gather-scatter or scalarized.
1484   void collectUniformsAndScalars(ElementCount VF) {
1485     // Do the analysis once.
1486     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1487       return;
1488     setCostBasedWideningDecision(VF);
1489     collectLoopUniforms(VF);
1490     collectLoopScalars(VF);
1491   }
1492 
1493   /// Returns true if the target machine supports masked store operation
1494   /// for the given \p DataType and kind of access to \p Ptr.
1495   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1496     return Legal->isConsecutivePtr(DataType, Ptr) &&
1497            TTI.isLegalMaskedStore(DataType, Alignment);
1498   }
1499 
1500   /// Returns true if the target machine supports masked load operation
1501   /// for the given \p DataType and kind of access to \p Ptr.
1502   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1503     return Legal->isConsecutivePtr(DataType, Ptr) &&
1504            TTI.isLegalMaskedLoad(DataType, Alignment);
1505   }
1506 
1507   /// Returns true if the target machine can represent \p V as a masked gather
1508   /// or scatter operation.
1509   bool isLegalGatherOrScatter(Value *V,
1510                               ElementCount VF = ElementCount::getFixed(1)) {
1511     bool LI = isa<LoadInst>(V);
1512     bool SI = isa<StoreInst>(V);
1513     if (!LI && !SI)
1514       return false;
1515     auto *Ty = getLoadStoreType(V);
1516     Align Align = getLoadStoreAlignment(V);
1517     if (VF.isVector())
1518       Ty = VectorType::get(Ty, VF);
1519     return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1520            (SI && TTI.isLegalMaskedScatter(Ty, Align));
1521   }
1522 
1523   /// Returns true if the target machine supports all of the reduction
1524   /// variables found for the given VF.
1525   bool canVectorizeReductions(ElementCount VF) const {
1526     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1527       const RecurrenceDescriptor &RdxDesc = Reduction.second;
1528       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1529     }));
1530   }
1531 
1532   /// Returns true if \p I is an instruction that will be scalarized with
1533   /// predication when vectorizing \p I with vectorization factor \p VF. Such
1534   /// instructions include conditional stores and instructions that may divide
1535   /// by zero.
1536   bool isScalarWithPredication(Instruction *I, ElementCount VF) const;
1537 
1538   // Returns true if \p I is an instruction that will be predicated either
1539   // through scalar predication or masked load/store or masked gather/scatter.
1540   // \p VF is the vectorization factor that will be used to vectorize \p I.
1541   // Superset of instructions that return true for isScalarWithPredication.
1542   bool isPredicatedInst(Instruction *I, ElementCount VF,
1543                         bool IsKnownUniform = false) {
1544     // When we know the load is uniform and the original scalar loop was not
1545     // predicated we don't need to mark it as a predicated instruction. Any
1546     // vectorised blocks created when tail-folding are something artificial we
1547     // have introduced and we know there is always at least one active lane.
1548     // That's why we call Legal->blockNeedsPredication here because it doesn't
1549     // query tail-folding.
1550     if (IsKnownUniform && isa<LoadInst>(I) &&
1551         !Legal->blockNeedsPredication(I->getParent()))
1552       return false;
1553     if (!blockNeedsPredicationForAnyReason(I->getParent()))
1554       return false;
1555     // Loads and stores that need some form of masked operation are predicated
1556     // instructions.
1557     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1558       return Legal->isMaskRequired(I);
1559     return isScalarWithPredication(I, VF);
1560   }
1561 
1562   /// Returns true if \p I is a memory instruction with consecutive memory
1563   /// access that can be widened.
1564   bool
1565   memoryInstructionCanBeWidened(Instruction *I,
1566                                 ElementCount VF = ElementCount::getFixed(1));
1567 
1568   /// Returns true if \p I is a memory instruction in an interleaved-group
1569   /// of memory accesses that can be vectorized with wide vector loads/stores
1570   /// and shuffles.
1571   bool
1572   interleavedAccessCanBeWidened(Instruction *I,
1573                                 ElementCount VF = ElementCount::getFixed(1));
1574 
1575   /// Check if \p Instr belongs to any interleaved access group.
1576   bool isAccessInterleaved(Instruction *Instr) {
1577     return InterleaveInfo.isInterleaved(Instr);
1578   }
1579 
1580   /// Get the interleaved access group that \p Instr belongs to.
1581   const InterleaveGroup<Instruction> *
1582   getInterleavedAccessGroup(Instruction *Instr) {
1583     return InterleaveInfo.getInterleaveGroup(Instr);
1584   }
1585 
1586   /// Returns true if we're required to use a scalar epilogue for at least
1587   /// the final iteration of the original loop.
1588   bool requiresScalarEpilogue(ElementCount VF) const {
1589     if (!isScalarEpilogueAllowed())
1590       return false;
1591     // If we might exit from anywhere but the latch, must run the exiting
1592     // iteration in scalar form.
1593     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1594       return true;
1595     return VF.isVector() && InterleaveInfo.requiresScalarEpilogue();
1596   }
1597 
1598   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1599   /// loop hint annotation.
1600   bool isScalarEpilogueAllowed() const {
1601     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1602   }
1603 
1604   /// Returns true if all loop blocks should be masked to fold tail loop.
1605   bool foldTailByMasking() const { return FoldTailByMasking; }
1606 
1607   /// Returns true if the instructions in this block requires predication
1608   /// for any reason, e.g. because tail folding now requires a predicate
1609   /// or because the block in the original loop was predicated.
1610   bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const {
1611     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1612   }
1613 
1614   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1615   /// nodes to the chain of instructions representing the reductions. Uses a
1616   /// MapVector to ensure deterministic iteration order.
1617   using ReductionChainMap =
1618       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1619 
1620   /// Return the chain of instructions representing an inloop reduction.
1621   const ReductionChainMap &getInLoopReductionChains() const {
1622     return InLoopReductionChains;
1623   }
1624 
1625   /// Returns true if the Phi is part of an inloop reduction.
1626   bool isInLoopReduction(PHINode *Phi) const {
1627     return InLoopReductionChains.count(Phi);
1628   }
1629 
1630   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1631   /// with factor VF.  Return the cost of the instruction, including
1632   /// scalarization overhead if it's needed.
1633   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1634 
1635   /// Estimate cost of a call instruction CI if it were vectorized with factor
1636   /// VF. Return the cost of the instruction, including scalarization overhead
1637   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1638   /// scalarized -
1639   /// i.e. either vector version isn't available, or is too expensive.
1640   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1641                                     bool &NeedToScalarize) const;
1642 
1643   /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1644   /// that of B.
1645   bool isMoreProfitable(const VectorizationFactor &A,
1646                         const VectorizationFactor &B) const;
1647 
1648   /// Invalidates decisions already taken by the cost model.
1649   void invalidateCostModelingDecisions() {
1650     WideningDecisions.clear();
1651     Uniforms.clear();
1652     Scalars.clear();
1653   }
1654 
1655 private:
1656   unsigned NumPredStores = 0;
1657 
1658   /// Convenience function that returns the value of vscale_range iff
1659   /// vscale_range.min == vscale_range.max or otherwise returns the value
1660   /// returned by the corresponding TLI method.
1661   Optional<unsigned> getVScaleForTuning() const;
1662 
1663   /// \return An upper bound for the vectorization factors for both
1664   /// fixed and scalable vectorization, where the minimum-known number of
1665   /// elements is a power-of-2 larger than zero. If scalable vectorization is
1666   /// disabled or unsupported, then the scalable part will be equal to
1667   /// ElementCount::getScalable(0).
1668   FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
1669                                            ElementCount UserVF,
1670                                            bool FoldTailByMasking);
1671 
1672   /// \return the maximized element count based on the targets vector
1673   /// registers and the loop trip-count, but limited to a maximum safe VF.
1674   /// This is a helper function of computeFeasibleMaxVF.
1675   /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1676   /// issue that occurred on one of the buildbots which cannot be reproduced
1677   /// without having access to the properietary compiler (see comments on
1678   /// D98509). The issue is currently under investigation and this workaround
1679   /// will be removed as soon as possible.
1680   ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1681                                        unsigned SmallestType,
1682                                        unsigned WidestType,
1683                                        const ElementCount &MaxSafeVF,
1684                                        bool FoldTailByMasking);
1685 
1686   /// \return the maximum legal scalable VF, based on the safe max number
1687   /// of elements.
1688   ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1689 
1690   /// The vectorization cost is a combination of the cost itself and a boolean
1691   /// indicating whether any of the contributing operations will actually
1692   /// operate on vector values after type legalization in the backend. If this
1693   /// latter value is false, then all operations will be scalarized (i.e. no
1694   /// vectorization has actually taken place).
1695   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1696 
1697   /// Returns the expected execution cost. The unit of the cost does
1698   /// not matter because we use the 'cost' units to compare different
1699   /// vector widths. The cost that is returned is *not* normalized by
1700   /// the factor width. If \p Invalid is not nullptr, this function
1701   /// will add a pair(Instruction*, ElementCount) to \p Invalid for
1702   /// each instruction that has an Invalid cost for the given VF.
1703   using InstructionVFPair = std::pair<Instruction *, ElementCount>;
1704   VectorizationCostTy
1705   expectedCost(ElementCount VF,
1706                SmallVectorImpl<InstructionVFPair> *Invalid = nullptr);
1707 
1708   /// Returns the execution time cost of an instruction for a given vector
1709   /// width. Vector width of one means scalar.
1710   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1711 
1712   /// The cost-computation logic from getInstructionCost which provides
1713   /// the vector type as an output parameter.
1714   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1715                                      Type *&VectorTy);
1716 
1717   /// Return the cost of instructions in an inloop reduction pattern, if I is
1718   /// part of that pattern.
1719   Optional<InstructionCost>
1720   getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy,
1721                           TTI::TargetCostKind CostKind);
1722 
1723   /// Calculate vectorization cost of memory instruction \p I.
1724   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1725 
1726   /// The cost computation for scalarized memory instruction.
1727   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1728 
1729   /// The cost computation for interleaving group of memory instructions.
1730   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1731 
1732   /// The cost computation for Gather/Scatter instruction.
1733   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1734 
1735   /// The cost computation for widening instruction \p I with consecutive
1736   /// memory access.
1737   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1738 
1739   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1740   /// Load: scalar load + broadcast.
1741   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1742   /// element)
1743   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1744 
1745   /// Estimate the overhead of scalarizing an instruction. This is a
1746   /// convenience wrapper for the type-based getScalarizationOverhead API.
1747   InstructionCost getScalarizationOverhead(Instruction *I,
1748                                            ElementCount VF) const;
1749 
1750   /// Returns whether the instruction is a load or store and will be a emitted
1751   /// as a vector operation.
1752   bool isConsecutiveLoadOrStore(Instruction *I);
1753 
1754   /// Returns true if an artificially high cost for emulated masked memrefs
1755   /// should be used.
1756   bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF);
1757 
1758   /// Map of scalar integer values to the smallest bitwidth they can be legally
1759   /// represented as. The vector equivalents of these values should be truncated
1760   /// to this type.
1761   MapVector<Instruction *, uint64_t> MinBWs;
1762 
1763   /// A type representing the costs for instructions if they were to be
1764   /// scalarized rather than vectorized. The entries are Instruction-Cost
1765   /// pairs.
1766   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1767 
1768   /// A set containing all BasicBlocks that are known to present after
1769   /// vectorization as a predicated block.
1770   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1771 
1772   /// Records whether it is allowed to have the original scalar loop execute at
1773   /// least once. This may be needed as a fallback loop in case runtime
1774   /// aliasing/dependence checks fail, or to handle the tail/remainder
1775   /// iterations when the trip count is unknown or doesn't divide by the VF,
1776   /// or as a peel-loop to handle gaps in interleave-groups.
1777   /// Under optsize and when the trip count is very small we don't allow any
1778   /// iterations to execute in the scalar loop.
1779   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1780 
1781   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1782   bool FoldTailByMasking = false;
1783 
1784   /// A map holding scalar costs for different vectorization factors. The
1785   /// presence of a cost for an instruction in the mapping indicates that the
1786   /// instruction will be scalarized when vectorizing with the associated
1787   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1788   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1789 
1790   /// Holds the instructions known to be uniform after vectorization.
1791   /// The data is collected per VF.
1792   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1793 
1794   /// Holds the instructions known to be scalar after vectorization.
1795   /// The data is collected per VF.
1796   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1797 
1798   /// Holds the instructions (address computations) that are forced to be
1799   /// scalarized.
1800   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1801 
1802   /// PHINodes of the reductions that should be expanded in-loop along with
1803   /// their associated chains of reduction operations, in program order from top
1804   /// (PHI) to bottom
1805   ReductionChainMap InLoopReductionChains;
1806 
1807   /// A Map of inloop reduction operations and their immediate chain operand.
1808   /// FIXME: This can be removed once reductions can be costed correctly in
1809   /// vplan. This was added to allow quick lookup to the inloop operations,
1810   /// without having to loop through InLoopReductionChains.
1811   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1812 
1813   /// Returns the expected difference in cost from scalarizing the expression
1814   /// feeding a predicated instruction \p PredInst. The instructions to
1815   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1816   /// non-negative return value implies the expression will be scalarized.
1817   /// Currently, only single-use chains are considered for scalarization.
1818   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1819                               ElementCount VF);
1820 
1821   /// Collect the instructions that are uniform after vectorization. An
1822   /// instruction is uniform if we represent it with a single scalar value in
1823   /// the vectorized loop corresponding to each vector iteration. Examples of
1824   /// uniform instructions include pointer operands of consecutive or
1825   /// interleaved memory accesses. Note that although uniformity implies an
1826   /// instruction will be scalar, the reverse is not true. In general, a
1827   /// scalarized instruction will be represented by VF scalar values in the
1828   /// vectorized loop, each corresponding to an iteration of the original
1829   /// scalar loop.
1830   void collectLoopUniforms(ElementCount VF);
1831 
1832   /// Collect the instructions that are scalar after vectorization. An
1833   /// instruction is scalar if it is known to be uniform or will be scalarized
1834   /// during vectorization. collectLoopScalars should only add non-uniform nodes
1835   /// to the list if they are used by a load/store instruction that is marked as
1836   /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1837   /// VF values in the vectorized loop, each corresponding to an iteration of
1838   /// the original scalar loop.
1839   void collectLoopScalars(ElementCount VF);
1840 
1841   /// Keeps cost model vectorization decision and cost for instructions.
1842   /// Right now it is used for memory instructions only.
1843   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1844                                 std::pair<InstWidening, InstructionCost>>;
1845 
1846   DecisionList WideningDecisions;
1847 
1848   /// Returns true if \p V is expected to be vectorized and it needs to be
1849   /// extracted.
1850   bool needsExtract(Value *V, ElementCount VF) const {
1851     Instruction *I = dyn_cast<Instruction>(V);
1852     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1853         TheLoop->isLoopInvariant(I))
1854       return false;
1855 
1856     // Assume we can vectorize V (and hence we need extraction) if the
1857     // scalars are not computed yet. This can happen, because it is called
1858     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1859     // the scalars are collected. That should be a safe assumption in most
1860     // cases, because we check if the operands have vectorizable types
1861     // beforehand in LoopVectorizationLegality.
1862     return Scalars.find(VF) == Scalars.end() ||
1863            !isScalarAfterVectorization(I, VF);
1864   };
1865 
1866   /// Returns a range containing only operands needing to be extracted.
1867   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1868                                                    ElementCount VF) const {
1869     return SmallVector<Value *, 4>(make_filter_range(
1870         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1871   }
1872 
1873   /// Determines if we have the infrastructure to vectorize loop \p L and its
1874   /// epilogue, assuming the main loop is vectorized by \p VF.
1875   bool isCandidateForEpilogueVectorization(const Loop &L,
1876                                            const ElementCount VF) const;
1877 
1878   /// Returns true if epilogue vectorization is considered profitable, and
1879   /// false otherwise.
1880   /// \p VF is the vectorization factor chosen for the original loop.
1881   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1882 
1883 public:
1884   /// The loop that we evaluate.
1885   Loop *TheLoop;
1886 
1887   /// Predicated scalar evolution analysis.
1888   PredicatedScalarEvolution &PSE;
1889 
1890   /// Loop Info analysis.
1891   LoopInfo *LI;
1892 
1893   /// Vectorization legality.
1894   LoopVectorizationLegality *Legal;
1895 
1896   /// Vector target information.
1897   const TargetTransformInfo &TTI;
1898 
1899   /// Target Library Info.
1900   const TargetLibraryInfo *TLI;
1901 
1902   /// Demanded bits analysis.
1903   DemandedBits *DB;
1904 
1905   /// Assumption cache.
1906   AssumptionCache *AC;
1907 
1908   /// Interface to emit optimization remarks.
1909   OptimizationRemarkEmitter *ORE;
1910 
1911   const Function *TheFunction;
1912 
1913   /// Loop Vectorize Hint.
1914   const LoopVectorizeHints *Hints;
1915 
1916   /// The interleave access information contains groups of interleaved accesses
1917   /// with the same stride and close to each other.
1918   InterleavedAccessInfo &InterleaveInfo;
1919 
1920   /// Values to ignore in the cost model.
1921   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1922 
1923   /// Values to ignore in the cost model when VF > 1.
1924   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1925 
1926   /// All element types found in the loop.
1927   SmallPtrSet<Type *, 16> ElementTypesInLoop;
1928 
1929   /// Profitable vector factors.
1930   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1931 };
1932 } // end namespace llvm
1933 
1934 /// Helper struct to manage generating runtime checks for vectorization.
1935 ///
1936 /// The runtime checks are created up-front in temporary blocks to allow better
1937 /// estimating the cost and un-linked from the existing IR. After deciding to
1938 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1939 /// temporary blocks are completely removed.
1940 class GeneratedRTChecks {
1941   /// Basic block which contains the generated SCEV checks, if any.
1942   BasicBlock *SCEVCheckBlock = nullptr;
1943 
1944   /// The value representing the result of the generated SCEV checks. If it is
1945   /// nullptr, either no SCEV checks have been generated or they have been used.
1946   Value *SCEVCheckCond = nullptr;
1947 
1948   /// Basic block which contains the generated memory runtime checks, if any.
1949   BasicBlock *MemCheckBlock = nullptr;
1950 
1951   /// The value representing the result of the generated memory runtime checks.
1952   /// If it is nullptr, either no memory runtime checks have been generated or
1953   /// they have been used.
1954   Value *MemRuntimeCheckCond = nullptr;
1955 
1956   DominatorTree *DT;
1957   LoopInfo *LI;
1958 
1959   SCEVExpander SCEVExp;
1960   SCEVExpander MemCheckExp;
1961 
1962 public:
1963   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1964                     const DataLayout &DL)
1965       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1966         MemCheckExp(SE, DL, "scev.check") {}
1967 
1968   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1969   /// accurately estimate the cost of the runtime checks. The blocks are
1970   /// un-linked from the IR and is added back during vector code generation. If
1971   /// there is no vector code generation, the check blocks are removed
1972   /// completely.
1973   void Create(Loop *L, const LoopAccessInfo &LAI,
1974               const SCEVPredicate &Pred) {
1975 
1976     BasicBlock *LoopHeader = L->getHeader();
1977     BasicBlock *Preheader = L->getLoopPreheader();
1978 
1979     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1980     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1981     // may be used by SCEVExpander. The blocks will be un-linked from their
1982     // predecessors and removed from LI & DT at the end of the function.
1983     if (!Pred.isAlwaysTrue()) {
1984       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1985                                   nullptr, "vector.scevcheck");
1986 
1987       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1988           &Pred, SCEVCheckBlock->getTerminator());
1989     }
1990 
1991     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1992     if (RtPtrChecking.Need) {
1993       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1994       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1995                                  "vector.memcheck");
1996 
1997       MemRuntimeCheckCond =
1998           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
1999                            RtPtrChecking.getChecks(), MemCheckExp);
2000       assert(MemRuntimeCheckCond &&
2001              "no RT checks generated although RtPtrChecking "
2002              "claimed checks are required");
2003     }
2004 
2005     if (!MemCheckBlock && !SCEVCheckBlock)
2006       return;
2007 
2008     // Unhook the temporary block with the checks, update various places
2009     // accordingly.
2010     if (SCEVCheckBlock)
2011       SCEVCheckBlock->replaceAllUsesWith(Preheader);
2012     if (MemCheckBlock)
2013       MemCheckBlock->replaceAllUsesWith(Preheader);
2014 
2015     if (SCEVCheckBlock) {
2016       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2017       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
2018       Preheader->getTerminator()->eraseFromParent();
2019     }
2020     if (MemCheckBlock) {
2021       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2022       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
2023       Preheader->getTerminator()->eraseFromParent();
2024     }
2025 
2026     DT->changeImmediateDominator(LoopHeader, Preheader);
2027     if (MemCheckBlock) {
2028       DT->eraseNode(MemCheckBlock);
2029       LI->removeBlock(MemCheckBlock);
2030     }
2031     if (SCEVCheckBlock) {
2032       DT->eraseNode(SCEVCheckBlock);
2033       LI->removeBlock(SCEVCheckBlock);
2034     }
2035   }
2036 
2037   /// Remove the created SCEV & memory runtime check blocks & instructions, if
2038   /// unused.
2039   ~GeneratedRTChecks() {
2040     SCEVExpanderCleaner SCEVCleaner(SCEVExp);
2041     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp);
2042     if (!SCEVCheckCond)
2043       SCEVCleaner.markResultUsed();
2044 
2045     if (!MemRuntimeCheckCond)
2046       MemCheckCleaner.markResultUsed();
2047 
2048     if (MemRuntimeCheckCond) {
2049       auto &SE = *MemCheckExp.getSE();
2050       // Memory runtime check generation creates compares that use expanded
2051       // values. Remove them before running the SCEVExpanderCleaners.
2052       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2053         if (MemCheckExp.isInsertedInstruction(&I))
2054           continue;
2055         SE.forgetValue(&I);
2056         I.eraseFromParent();
2057       }
2058     }
2059     MemCheckCleaner.cleanup();
2060     SCEVCleaner.cleanup();
2061 
2062     if (SCEVCheckCond)
2063       SCEVCheckBlock->eraseFromParent();
2064     if (MemRuntimeCheckCond)
2065       MemCheckBlock->eraseFromParent();
2066   }
2067 
2068   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2069   /// adjusts the branches to branch to the vector preheader or \p Bypass,
2070   /// depending on the generated condition.
2071   BasicBlock *emitSCEVChecks(BasicBlock *Bypass,
2072                              BasicBlock *LoopVectorPreHeader,
2073                              BasicBlock *LoopExitBlock) {
2074     if (!SCEVCheckCond)
2075       return nullptr;
2076     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2077       if (C->isZero())
2078         return nullptr;
2079 
2080     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2081 
2082     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2083     // Create new preheader for vector loop.
2084     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2085       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2086 
2087     SCEVCheckBlock->getTerminator()->eraseFromParent();
2088     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2089     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2090                                                 SCEVCheckBlock);
2091 
2092     DT->addNewBlock(SCEVCheckBlock, Pred);
2093     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2094 
2095     ReplaceInstWithInst(
2096         SCEVCheckBlock->getTerminator(),
2097         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2098     // Mark the check as used, to prevent it from being removed during cleanup.
2099     SCEVCheckCond = nullptr;
2100     return SCEVCheckBlock;
2101   }
2102 
2103   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2104   /// the branches to branch to the vector preheader or \p Bypass, depending on
2105   /// the generated condition.
2106   BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass,
2107                                    BasicBlock *LoopVectorPreHeader) {
2108     // Check if we generated code that checks in runtime if arrays overlap.
2109     if (!MemRuntimeCheckCond)
2110       return nullptr;
2111 
2112     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2113     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2114                                                 MemCheckBlock);
2115 
2116     DT->addNewBlock(MemCheckBlock, Pred);
2117     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2118     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2119 
2120     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2121       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2122 
2123     ReplaceInstWithInst(
2124         MemCheckBlock->getTerminator(),
2125         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2126     MemCheckBlock->getTerminator()->setDebugLoc(
2127         Pred->getTerminator()->getDebugLoc());
2128 
2129     // Mark the check as used, to prevent it from being removed during cleanup.
2130     MemRuntimeCheckCond = nullptr;
2131     return MemCheckBlock;
2132   }
2133 };
2134 
2135 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2136 // vectorization. The loop needs to be annotated with #pragma omp simd
2137 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2138 // vector length information is not provided, vectorization is not considered
2139 // explicit. Interleave hints are not allowed either. These limitations will be
2140 // relaxed in the future.
2141 // Please, note that we are currently forced to abuse the pragma 'clang
2142 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2143 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2144 // provides *explicit vectorization hints* (LV can bypass legal checks and
2145 // assume that vectorization is legal). However, both hints are implemented
2146 // using the same metadata (llvm.loop.vectorize, processed by
2147 // LoopVectorizeHints). This will be fixed in the future when the native IR
2148 // representation for pragma 'omp simd' is introduced.
2149 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2150                                    OptimizationRemarkEmitter *ORE) {
2151   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2152   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2153 
2154   // Only outer loops with an explicit vectorization hint are supported.
2155   // Unannotated outer loops are ignored.
2156   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2157     return false;
2158 
2159   Function *Fn = OuterLp->getHeader()->getParent();
2160   if (!Hints.allowVectorization(Fn, OuterLp,
2161                                 true /*VectorizeOnlyWhenForced*/)) {
2162     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2163     return false;
2164   }
2165 
2166   if (Hints.getInterleave() > 1) {
2167     // TODO: Interleave support is future work.
2168     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2169                          "outer loops.\n");
2170     Hints.emitRemarkWithHints();
2171     return false;
2172   }
2173 
2174   return true;
2175 }
2176 
2177 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2178                                   OptimizationRemarkEmitter *ORE,
2179                                   SmallVectorImpl<Loop *> &V) {
2180   // Collect inner loops and outer loops without irreducible control flow. For
2181   // now, only collect outer loops that have explicit vectorization hints. If we
2182   // are stress testing the VPlan H-CFG construction, we collect the outermost
2183   // loop of every loop nest.
2184   if (L.isInnermost() || VPlanBuildStressTest ||
2185       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2186     LoopBlocksRPO RPOT(&L);
2187     RPOT.perform(LI);
2188     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2189       V.push_back(&L);
2190       // TODO: Collect inner loops inside marked outer loops in case
2191       // vectorization fails for the outer loop. Do not invoke
2192       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2193       // already known to be reducible. We can use an inherited attribute for
2194       // that.
2195       return;
2196     }
2197   }
2198   for (Loop *InnerL : L)
2199     collectSupportedLoops(*InnerL, LI, ORE, V);
2200 }
2201 
2202 namespace {
2203 
2204 /// The LoopVectorize Pass.
2205 struct LoopVectorize : public FunctionPass {
2206   /// Pass identification, replacement for typeid
2207   static char ID;
2208 
2209   LoopVectorizePass Impl;
2210 
2211   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2212                          bool VectorizeOnlyWhenForced = false)
2213       : FunctionPass(ID),
2214         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2215     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2216   }
2217 
2218   bool runOnFunction(Function &F) override {
2219     if (skipFunction(F))
2220       return false;
2221 
2222     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2223     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2224     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2225     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2226     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2227     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2228     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2229     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2230     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2231     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2232     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2233     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2234     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2235 
2236     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2237         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2238 
2239     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2240                         GetLAA, *ORE, PSI).MadeAnyChange;
2241   }
2242 
2243   void getAnalysisUsage(AnalysisUsage &AU) const override {
2244     AU.addRequired<AssumptionCacheTracker>();
2245     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2246     AU.addRequired<DominatorTreeWrapperPass>();
2247     AU.addRequired<LoopInfoWrapperPass>();
2248     AU.addRequired<ScalarEvolutionWrapperPass>();
2249     AU.addRequired<TargetTransformInfoWrapperPass>();
2250     AU.addRequired<AAResultsWrapperPass>();
2251     AU.addRequired<LoopAccessLegacyAnalysis>();
2252     AU.addRequired<DemandedBitsWrapperPass>();
2253     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2254     AU.addRequired<InjectTLIMappingsLegacy>();
2255 
2256     // We currently do not preserve loopinfo/dominator analyses with outer loop
2257     // vectorization. Until this is addressed, mark these analyses as preserved
2258     // only for non-VPlan-native path.
2259     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2260     if (!EnableVPlanNativePath) {
2261       AU.addPreserved<LoopInfoWrapperPass>();
2262       AU.addPreserved<DominatorTreeWrapperPass>();
2263     }
2264 
2265     AU.addPreserved<BasicAAWrapperPass>();
2266     AU.addPreserved<GlobalsAAWrapperPass>();
2267     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2268   }
2269 };
2270 
2271 } // end anonymous namespace
2272 
2273 //===----------------------------------------------------------------------===//
2274 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2275 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2276 //===----------------------------------------------------------------------===//
2277 
2278 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2279   // We need to place the broadcast of invariant variables outside the loop,
2280   // but only if it's proven safe to do so. Else, broadcast will be inside
2281   // vector loop body.
2282   Instruction *Instr = dyn_cast<Instruction>(V);
2283   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2284                      (!Instr ||
2285                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2286   // Place the code for broadcasting invariant variables in the new preheader.
2287   IRBuilder<>::InsertPointGuard Guard(Builder);
2288   if (SafeToHoist)
2289     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2290 
2291   // Broadcast the scalar into all locations in the vector.
2292   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2293 
2294   return Shuf;
2295 }
2296 
2297 /// This function adds
2298 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
2299 /// to each vector element of Val. The sequence starts at StartIndex.
2300 /// \p Opcode is relevant for FP induction variable.
2301 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
2302                             Instruction::BinaryOps BinOp, ElementCount VF,
2303                             IRBuilderBase &Builder) {
2304   assert(VF.isVector() && "only vector VFs are supported");
2305 
2306   // Create and check the types.
2307   auto *ValVTy = cast<VectorType>(Val->getType());
2308   ElementCount VLen = ValVTy->getElementCount();
2309 
2310   Type *STy = Val->getType()->getScalarType();
2311   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2312          "Induction Step must be an integer or FP");
2313   assert(Step->getType() == STy && "Step has wrong type");
2314 
2315   SmallVector<Constant *, 8> Indices;
2316 
2317   // Create a vector of consecutive numbers from zero to VF.
2318   VectorType *InitVecValVTy = ValVTy;
2319   if (STy->isFloatingPointTy()) {
2320     Type *InitVecValSTy =
2321         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2322     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2323   }
2324   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2325 
2326   // Splat the StartIdx
2327   Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
2328 
2329   if (STy->isIntegerTy()) {
2330     InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2331     Step = Builder.CreateVectorSplat(VLen, Step);
2332     assert(Step->getType() == Val->getType() && "Invalid step vec");
2333     // FIXME: The newly created binary instructions should contain nsw/nuw
2334     // flags, which can be found from the original scalar operations.
2335     Step = Builder.CreateMul(InitVec, Step);
2336     return Builder.CreateAdd(Val, Step, "induction");
2337   }
2338 
2339   // Floating point induction.
2340   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2341          "Binary Opcode should be specified for FP induction");
2342   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2343   InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
2344 
2345   Step = Builder.CreateVectorSplat(VLen, Step);
2346   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2347   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2348 }
2349 
2350 /// Compute scalar induction steps. \p ScalarIV is the scalar induction
2351 /// variable on which to base the steps, \p Step is the size of the step.
2352 static void buildScalarSteps(Value *ScalarIV, Value *Step,
2353                              const InductionDescriptor &ID, VPValue *Def,
2354                              VPTransformState &State) {
2355   IRBuilderBase &Builder = State.Builder;
2356   // We shouldn't have to build scalar steps if we aren't vectorizing.
2357   assert(State.VF.isVector() && "VF should be greater than one");
2358   // Get the value type and ensure it and the step have the same integer type.
2359   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2360   assert(ScalarIVTy == Step->getType() &&
2361          "Val and Step should have the same type");
2362 
2363   // We build scalar steps for both integer and floating-point induction
2364   // variables. Here, we determine the kind of arithmetic we will perform.
2365   Instruction::BinaryOps AddOp;
2366   Instruction::BinaryOps MulOp;
2367   if (ScalarIVTy->isIntegerTy()) {
2368     AddOp = Instruction::Add;
2369     MulOp = Instruction::Mul;
2370   } else {
2371     AddOp = ID.getInductionOpcode();
2372     MulOp = Instruction::FMul;
2373   }
2374 
2375   // Determine the number of scalars we need to generate for each unroll
2376   // iteration.
2377   bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def);
2378   unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue();
2379   // Compute the scalar steps and save the results in State.
2380   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2381                                      ScalarIVTy->getScalarSizeInBits());
2382   Type *VecIVTy = nullptr;
2383   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2384   if (!FirstLaneOnly && State.VF.isScalable()) {
2385     VecIVTy = VectorType::get(ScalarIVTy, State.VF);
2386     UnitStepVec =
2387         Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
2388     SplatStep = Builder.CreateVectorSplat(State.VF, Step);
2389     SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV);
2390   }
2391 
2392   for (unsigned Part = 0; Part < State.UF; ++Part) {
2393     Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part);
2394 
2395     if (!FirstLaneOnly && State.VF.isScalable()) {
2396       auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
2397       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2398       if (ScalarIVTy->isFloatingPointTy())
2399         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2400       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2401       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2402       State.set(Def, Add, Part);
2403       // It's useful to record the lane values too for the known minimum number
2404       // of elements so we do those below. This improves the code quality when
2405       // trying to extract the first element, for example.
2406     }
2407 
2408     if (ScalarIVTy->isFloatingPointTy())
2409       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2410 
2411     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2412       Value *StartIdx = Builder.CreateBinOp(
2413           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2414       // The step returned by `createStepForVF` is a runtime-evaluated value
2415       // when VF is scalable. Otherwise, it should be folded into a Constant.
2416       assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
2417              "Expected StartIdx to be folded to a constant when VF is not "
2418              "scalable");
2419       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2420       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2421       State.set(Def, Add, VPIteration(Part, Lane));
2422     }
2423   }
2424 }
2425 
2426 // Generate code for the induction step. Note that induction steps are
2427 // required to be loop-invariant
2428 static Value *CreateStepValue(const SCEV *Step, ScalarEvolution &SE,
2429                               Instruction *InsertBefore,
2430                               Loop *OrigLoop = nullptr) {
2431   const DataLayout &DL = SE.getDataLayout();
2432   assert((!OrigLoop || SE.isLoopInvariant(Step, OrigLoop)) &&
2433          "Induction step should be loop invariant");
2434   if (auto *E = dyn_cast<SCEVUnknown>(Step))
2435     return E->getValue();
2436 
2437   SCEVExpander Exp(SE, DL, "induction");
2438   return Exp.expandCodeFor(Step, Step->getType(), InsertBefore);
2439 }
2440 
2441 /// Compute the transformed value of Index at offset StartValue using step
2442 /// StepValue.
2443 /// For integer induction, returns StartValue + Index * StepValue.
2444 /// For pointer induction, returns StartValue[Index * StepValue].
2445 /// FIXME: The newly created binary instructions should contain nsw/nuw
2446 /// flags, which can be found from the original scalar operations.
2447 static Value *emitTransformedIndex(IRBuilderBase &B, Value *Index,
2448                                    Value *StartValue, Value *Step,
2449                                    const InductionDescriptor &ID) {
2450   assert(Index->getType()->getScalarType() == Step->getType() &&
2451          "Index scalar type does not match StepValue type");
2452 
2453   // Note: the IR at this point is broken. We cannot use SE to create any new
2454   // SCEV and then expand it, hoping that SCEV's simplification will give us
2455   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2456   // lead to various SCEV crashes. So all we can do is to use builder and rely
2457   // on InstCombine for future simplifications. Here we handle some trivial
2458   // cases only.
2459   auto CreateAdd = [&B](Value *X, Value *Y) {
2460     assert(X->getType() == Y->getType() && "Types don't match!");
2461     if (auto *CX = dyn_cast<ConstantInt>(X))
2462       if (CX->isZero())
2463         return Y;
2464     if (auto *CY = dyn_cast<ConstantInt>(Y))
2465       if (CY->isZero())
2466         return X;
2467     return B.CreateAdd(X, Y);
2468   };
2469 
2470   // We allow X to be a vector type, in which case Y will potentially be
2471   // splatted into a vector with the same element count.
2472   auto CreateMul = [&B](Value *X, Value *Y) {
2473     assert(X->getType()->getScalarType() == Y->getType() &&
2474            "Types don't match!");
2475     if (auto *CX = dyn_cast<ConstantInt>(X))
2476       if (CX->isOne())
2477         return Y;
2478     if (auto *CY = dyn_cast<ConstantInt>(Y))
2479       if (CY->isOne())
2480         return X;
2481     VectorType *XVTy = dyn_cast<VectorType>(X->getType());
2482     if (XVTy && !isa<VectorType>(Y->getType()))
2483       Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
2484     return B.CreateMul(X, Y);
2485   };
2486 
2487   switch (ID.getKind()) {
2488   case InductionDescriptor::IK_IntInduction: {
2489     assert(!isa<VectorType>(Index->getType()) &&
2490            "Vector indices not supported for integer inductions yet");
2491     assert(Index->getType() == StartValue->getType() &&
2492            "Index type does not match StartValue type");
2493     if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne())
2494       return B.CreateSub(StartValue, Index);
2495     auto *Offset = CreateMul(Index, Step);
2496     return CreateAdd(StartValue, Offset);
2497   }
2498   case InductionDescriptor::IK_PtrInduction: {
2499     assert(isa<Constant>(Step) &&
2500            "Expected constant step for pointer induction");
2501     return B.CreateGEP(ID.getElementType(), StartValue, CreateMul(Index, Step));
2502   }
2503   case InductionDescriptor::IK_FpInduction: {
2504     assert(!isa<VectorType>(Index->getType()) &&
2505            "Vector indices not supported for FP inductions yet");
2506     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2507     auto InductionBinOp = ID.getInductionBinOp();
2508     assert(InductionBinOp &&
2509            (InductionBinOp->getOpcode() == Instruction::FAdd ||
2510             InductionBinOp->getOpcode() == Instruction::FSub) &&
2511            "Original bin op should be defined for FP induction");
2512 
2513     Value *MulExp = B.CreateFMul(Step, Index);
2514     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2515                          "induction");
2516   }
2517   case InductionDescriptor::IK_NoInduction:
2518     return nullptr;
2519   }
2520   llvm_unreachable("invalid enum");
2521 }
2522 
2523 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2524                                                     const VPIteration &Instance,
2525                                                     VPTransformState &State) {
2526   Value *ScalarInst = State.get(Def, Instance);
2527   Value *VectorValue = State.get(Def, Instance.Part);
2528   VectorValue = Builder.CreateInsertElement(
2529       VectorValue, ScalarInst,
2530       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2531   State.set(Def, VectorValue, Instance.Part);
2532 }
2533 
2534 // Return whether we allow using masked interleave-groups (for dealing with
2535 // strided loads/stores that reside in predicated blocks, or for dealing
2536 // with gaps).
2537 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2538   // If an override option has been passed in for interleaved accesses, use it.
2539   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2540     return EnableMaskedInterleavedMemAccesses;
2541 
2542   return TTI.enableMaskedInterleavedAccessVectorization();
2543 }
2544 
2545 // Try to vectorize the interleave group that \p Instr belongs to.
2546 //
2547 // E.g. Translate following interleaved load group (factor = 3):
2548 //   for (i = 0; i < N; i+=3) {
2549 //     R = Pic[i];             // Member of index 0
2550 //     G = Pic[i+1];           // Member of index 1
2551 //     B = Pic[i+2];           // Member of index 2
2552 //     ... // do something to R, G, B
2553 //   }
2554 // To:
2555 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2556 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2557 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2558 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2559 //
2560 // Or translate following interleaved store group (factor = 3):
2561 //   for (i = 0; i < N; i+=3) {
2562 //     ... do something to R, G, B
2563 //     Pic[i]   = R;           // Member of index 0
2564 //     Pic[i+1] = G;           // Member of index 1
2565 //     Pic[i+2] = B;           // Member of index 2
2566 //   }
2567 // To:
2568 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2569 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2570 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2571 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2572 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2573 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2574     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2575     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2576     VPValue *BlockInMask) {
2577   Instruction *Instr = Group->getInsertPos();
2578   const DataLayout &DL = Instr->getModule()->getDataLayout();
2579 
2580   // Prepare for the vector type of the interleaved load/store.
2581   Type *ScalarTy = getLoadStoreType(Instr);
2582   unsigned InterleaveFactor = Group->getFactor();
2583   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2584   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2585 
2586   // Prepare for the new pointers.
2587   SmallVector<Value *, 2> AddrParts;
2588   unsigned Index = Group->getIndex(Instr);
2589 
2590   // TODO: extend the masked interleaved-group support to reversed access.
2591   assert((!BlockInMask || !Group->isReverse()) &&
2592          "Reversed masked interleave-group not supported.");
2593 
2594   // If the group is reverse, adjust the index to refer to the last vector lane
2595   // instead of the first. We adjust the index from the first vector lane,
2596   // rather than directly getting the pointer for lane VF - 1, because the
2597   // pointer operand of the interleaved access is supposed to be uniform. For
2598   // uniform instructions, we're only required to generate a value for the
2599   // first vector lane in each unroll iteration.
2600   if (Group->isReverse())
2601     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2602 
2603   for (unsigned Part = 0; Part < UF; Part++) {
2604     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2605     setDebugLocFromInst(AddrPart);
2606 
2607     // Notice current instruction could be any index. Need to adjust the address
2608     // to the member of index 0.
2609     //
2610     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2611     //       b = A[i];       // Member of index 0
2612     // Current pointer is pointed to A[i+1], adjust it to A[i].
2613     //
2614     // E.g.  A[i+1] = a;     // Member of index 1
2615     //       A[i]   = b;     // Member of index 0
2616     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2617     // Current pointer is pointed to A[i+2], adjust it to A[i].
2618 
2619     bool InBounds = false;
2620     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2621       InBounds = gep->isInBounds();
2622     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2623     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2624 
2625     // Cast to the vector pointer type.
2626     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2627     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2628     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2629   }
2630 
2631   setDebugLocFromInst(Instr);
2632   Value *PoisonVec = PoisonValue::get(VecTy);
2633 
2634   Value *MaskForGaps = nullptr;
2635   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2636     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2637     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2638   }
2639 
2640   // Vectorize the interleaved load group.
2641   if (isa<LoadInst>(Instr)) {
2642     // For each unroll part, create a wide load for the group.
2643     SmallVector<Value *, 2> NewLoads;
2644     for (unsigned Part = 0; Part < UF; Part++) {
2645       Instruction *NewLoad;
2646       if (BlockInMask || MaskForGaps) {
2647         assert(useMaskedInterleavedAccesses(*TTI) &&
2648                "masked interleaved groups are not allowed.");
2649         Value *GroupMask = MaskForGaps;
2650         if (BlockInMask) {
2651           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2652           Value *ShuffledMask = Builder.CreateShuffleVector(
2653               BlockInMaskPart,
2654               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2655               "interleaved.mask");
2656           GroupMask = MaskForGaps
2657                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2658                                                 MaskForGaps)
2659                           : ShuffledMask;
2660         }
2661         NewLoad =
2662             Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(),
2663                                      GroupMask, PoisonVec, "wide.masked.vec");
2664       }
2665       else
2666         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2667                                             Group->getAlign(), "wide.vec");
2668       Group->addMetadata(NewLoad);
2669       NewLoads.push_back(NewLoad);
2670     }
2671 
2672     // For each member in the group, shuffle out the appropriate data from the
2673     // wide loads.
2674     unsigned J = 0;
2675     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2676       Instruction *Member = Group->getMember(I);
2677 
2678       // Skip the gaps in the group.
2679       if (!Member)
2680         continue;
2681 
2682       auto StrideMask =
2683           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2684       for (unsigned Part = 0; Part < UF; Part++) {
2685         Value *StridedVec = Builder.CreateShuffleVector(
2686             NewLoads[Part], StrideMask, "strided.vec");
2687 
2688         // If this member has different type, cast the result type.
2689         if (Member->getType() != ScalarTy) {
2690           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2691           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2692           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2693         }
2694 
2695         if (Group->isReverse())
2696           StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse");
2697 
2698         State.set(VPDefs[J], StridedVec, Part);
2699       }
2700       ++J;
2701     }
2702     return;
2703   }
2704 
2705   // The sub vector type for current instruction.
2706   auto *SubVT = VectorType::get(ScalarTy, VF);
2707 
2708   // Vectorize the interleaved store group.
2709   MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2710   assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) &&
2711          "masked interleaved groups are not allowed.");
2712   assert((!MaskForGaps || !VF.isScalable()) &&
2713          "masking gaps for scalable vectors is not yet supported.");
2714   for (unsigned Part = 0; Part < UF; Part++) {
2715     // Collect the stored vector from each member.
2716     SmallVector<Value *, 4> StoredVecs;
2717     for (unsigned i = 0; i < InterleaveFactor; i++) {
2718       assert((Group->getMember(i) || MaskForGaps) &&
2719              "Fail to get a member from an interleaved store group");
2720       Instruction *Member = Group->getMember(i);
2721 
2722       // Skip the gaps in the group.
2723       if (!Member) {
2724         Value *Undef = PoisonValue::get(SubVT);
2725         StoredVecs.push_back(Undef);
2726         continue;
2727       }
2728 
2729       Value *StoredVec = State.get(StoredValues[i], Part);
2730 
2731       if (Group->isReverse())
2732         StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse");
2733 
2734       // If this member has different type, cast it to a unified type.
2735 
2736       if (StoredVec->getType() != SubVT)
2737         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2738 
2739       StoredVecs.push_back(StoredVec);
2740     }
2741 
2742     // Concatenate all vectors into a wide vector.
2743     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2744 
2745     // Interleave the elements in the wide vector.
2746     Value *IVec = Builder.CreateShuffleVector(
2747         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2748         "interleaved.vec");
2749 
2750     Instruction *NewStoreInstr;
2751     if (BlockInMask || MaskForGaps) {
2752       Value *GroupMask = MaskForGaps;
2753       if (BlockInMask) {
2754         Value *BlockInMaskPart = State.get(BlockInMask, Part);
2755         Value *ShuffledMask = Builder.CreateShuffleVector(
2756             BlockInMaskPart,
2757             createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2758             "interleaved.mask");
2759         GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And,
2760                                                       ShuffledMask, MaskForGaps)
2761                                 : ShuffledMask;
2762       }
2763       NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part],
2764                                                 Group->getAlign(), GroupMask);
2765     } else
2766       NewStoreInstr =
2767           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2768 
2769     Group->addMetadata(NewStoreInstr);
2770   }
2771 }
2772 
2773 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2774                                                VPReplicateRecipe *RepRecipe,
2775                                                const VPIteration &Instance,
2776                                                bool IfPredicateInstr,
2777                                                VPTransformState &State) {
2778   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2779 
2780   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
2781   // the first lane and part.
2782   if (isa<NoAliasScopeDeclInst>(Instr))
2783     if (!Instance.isFirstIteration())
2784       return;
2785 
2786   // Does this instruction return a value ?
2787   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2788 
2789   Instruction *Cloned = Instr->clone();
2790   if (!IsVoidRetTy)
2791     Cloned->setName(Instr->getName() + ".cloned");
2792 
2793   // If the scalarized instruction contributes to the address computation of a
2794   // widen masked load/store which was in a basic block that needed predication
2795   // and is not predicated after vectorization, we can't propagate
2796   // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized
2797   // instruction could feed a poison value to the base address of the widen
2798   // load/store.
2799   if (State.MayGeneratePoisonRecipes.contains(RepRecipe))
2800     Cloned->dropPoisonGeneratingFlags();
2801 
2802   if (Instr->getDebugLoc())
2803     setDebugLocFromInst(Instr);
2804 
2805   // Replace the operands of the cloned instructions with their scalar
2806   // equivalents in the new loop.
2807   for (auto &I : enumerate(RepRecipe->operands())) {
2808     auto InputInstance = Instance;
2809     VPValue *Operand = I.value();
2810     VPReplicateRecipe *OperandR = dyn_cast<VPReplicateRecipe>(Operand);
2811     if (OperandR && OperandR->isUniform())
2812       InputInstance.Lane = VPLane::getFirstLane();
2813     Cloned->setOperand(I.index(), State.get(Operand, InputInstance));
2814   }
2815   addNewMetadata(Cloned, Instr);
2816 
2817   // Place the cloned scalar in the new loop.
2818   State.Builder.Insert(Cloned);
2819 
2820   State.set(RepRecipe, Cloned, Instance);
2821 
2822   // If we just cloned a new assumption, add it the assumption cache.
2823   if (auto *II = dyn_cast<AssumeInst>(Cloned))
2824     AC->registerAssumption(II);
2825 
2826   // End if-block.
2827   if (IfPredicateInstr)
2828     PredicatedInstructions.push_back(Cloned);
2829 }
2830 
2831 Value *InnerLoopVectorizer::getOrCreateTripCount(BasicBlock *InsertBlock) {
2832   if (TripCount)
2833     return TripCount;
2834 
2835   assert(InsertBlock);
2836   IRBuilder<> Builder(InsertBlock->getTerminator());
2837   // Find the loop boundaries.
2838   ScalarEvolution *SE = PSE.getSE();
2839   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2840   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
2841          "Invalid loop count");
2842 
2843   Type *IdxTy = Legal->getWidestInductionType();
2844   assert(IdxTy && "No type for induction");
2845 
2846   // The exit count might have the type of i64 while the phi is i32. This can
2847   // happen if we have an induction variable that is sign extended before the
2848   // compare. The only way that we get a backedge taken count is that the
2849   // induction variable was signed and as such will not overflow. In such a case
2850   // truncation is legal.
2851   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
2852       IdxTy->getPrimitiveSizeInBits())
2853     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2854   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2855 
2856   // Get the total trip count from the count by adding 1.
2857   const SCEV *ExitCount = SE->getAddExpr(
2858       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2859 
2860   const DataLayout &DL = InsertBlock->getModule()->getDataLayout();
2861 
2862   // Expand the trip count and place the new instructions in the preheader.
2863   // Notice that the pre-header does not change, only the loop body.
2864   SCEVExpander Exp(*SE, DL, "induction");
2865 
2866   // Count holds the overall loop count (N).
2867   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2868                                 InsertBlock->getTerminator());
2869 
2870   if (TripCount->getType()->isPointerTy())
2871     TripCount =
2872         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2873                                     InsertBlock->getTerminator());
2874 
2875   return TripCount;
2876 }
2877 
2878 Value *
2879 InnerLoopVectorizer::getOrCreateVectorTripCount(BasicBlock *InsertBlock) {
2880   if (VectorTripCount)
2881     return VectorTripCount;
2882 
2883   Value *TC = getOrCreateTripCount(InsertBlock);
2884   IRBuilder<> Builder(InsertBlock->getTerminator());
2885 
2886   Type *Ty = TC->getType();
2887   // This is where we can make the step a runtime constant.
2888   Value *Step = createStepForVF(Builder, Ty, VF, UF);
2889 
2890   // If the tail is to be folded by masking, round the number of iterations N
2891   // up to a multiple of Step instead of rounding down. This is done by first
2892   // adding Step-1 and then rounding down. Note that it's ok if this addition
2893   // overflows: the vector induction variable will eventually wrap to zero given
2894   // that it starts at zero and its Step is a power of two; the loop will then
2895   // exit, with the last early-exit vector comparison also producing all-true.
2896   if (Cost->foldTailByMasking()) {
2897     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
2898            "VF*UF must be a power of 2 when folding tail by masking");
2899     Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF);
2900     TC = Builder.CreateAdd(
2901         TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up");
2902   }
2903 
2904   // Now we need to generate the expression for the part of the loop that the
2905   // vectorized body will execute. This is equal to N - (N % Step) if scalar
2906   // iterations are not required for correctness, or N - Step, otherwise. Step
2907   // is equal to the vectorization factor (number of SIMD elements) times the
2908   // unroll factor (number of SIMD instructions).
2909   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2910 
2911   // There are cases where we *must* run at least one iteration in the remainder
2912   // loop.  See the cost model for when this can happen.  If the step evenly
2913   // divides the trip count, we set the remainder to be equal to the step. If
2914   // the step does not evenly divide the trip count, no adjustment is necessary
2915   // since there will already be scalar iterations. Note that the minimum
2916   // iterations check ensures that N >= Step.
2917   if (Cost->requiresScalarEpilogue(VF)) {
2918     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2919     R = Builder.CreateSelect(IsZero, Step, R);
2920   }
2921 
2922   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2923 
2924   return VectorTripCount;
2925 }
2926 
2927 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
2928                                                    const DataLayout &DL) {
2929   // Verify that V is a vector type with same number of elements as DstVTy.
2930   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
2931   unsigned VF = DstFVTy->getNumElements();
2932   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
2933   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
2934   Type *SrcElemTy = SrcVecTy->getElementType();
2935   Type *DstElemTy = DstFVTy->getElementType();
2936   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2937          "Vector elements must have same size");
2938 
2939   // Do a direct cast if element types are castable.
2940   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2941     return Builder.CreateBitOrPointerCast(V, DstFVTy);
2942   }
2943   // V cannot be directly casted to desired vector type.
2944   // May happen when V is a floating point vector but DstVTy is a vector of
2945   // pointers or vice-versa. Handle this using a two-step bitcast using an
2946   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2947   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2948          "Only one type should be a pointer type");
2949   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2950          "Only one type should be a floating point type");
2951   Type *IntTy =
2952       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2953   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
2954   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2955   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
2956 }
2957 
2958 void InnerLoopVectorizer::emitMinimumIterationCountCheck(BasicBlock *Bypass) {
2959   Value *Count = getOrCreateTripCount(LoopVectorPreHeader);
2960   // Reuse existing vector loop preheader for TC checks.
2961   // Note that new preheader block is generated for vector loop.
2962   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
2963   IRBuilder<> Builder(TCCheckBlock->getTerminator());
2964 
2965   // Generate code to check if the loop's trip count is less than VF * UF, or
2966   // equal to it in case a scalar epilogue is required; this implies that the
2967   // vector trip count is zero. This check also covers the case where adding one
2968   // to the backedge-taken count overflowed leading to an incorrect trip count
2969   // of zero. In this case we will also jump to the scalar loop.
2970   auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE
2971                                             : ICmpInst::ICMP_ULT;
2972 
2973   // If tail is to be folded, vector loop takes care of all iterations.
2974   Value *CheckMinIters = Builder.getFalse();
2975   if (!Cost->foldTailByMasking()) {
2976     Value *Step = createStepForVF(Builder, Count->getType(), VF, UF);
2977     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
2978   }
2979   // Create new preheader for vector loop.
2980   LoopVectorPreHeader =
2981       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
2982                  "vector.ph");
2983 
2984   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
2985                                DT->getNode(Bypass)->getIDom()) &&
2986          "TC check is expected to dominate Bypass");
2987 
2988   // Update dominator for Bypass & LoopExit (if needed).
2989   DT->changeImmediateDominator(Bypass, TCCheckBlock);
2990   if (!Cost->requiresScalarEpilogue(VF))
2991     // If there is an epilogue which must run, there's no edge from the
2992     // middle block to exit blocks  and thus no need to update the immediate
2993     // dominator of the exit blocks.
2994     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
2995 
2996   ReplaceInstWithInst(
2997       TCCheckBlock->getTerminator(),
2998       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
2999   LoopBypassBlocks.push_back(TCCheckBlock);
3000 }
3001 
3002 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(BasicBlock *Bypass) {
3003 
3004   BasicBlock *const SCEVCheckBlock =
3005       RTChecks.emitSCEVChecks(Bypass, LoopVectorPreHeader, LoopExitBlock);
3006   if (!SCEVCheckBlock)
3007     return nullptr;
3008 
3009   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3010            (OptForSizeBasedOnProfile &&
3011             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3012          "Cannot SCEV check stride or overflow when optimizing for size");
3013 
3014 
3015   // Update dominator only if this is first RT check.
3016   if (LoopBypassBlocks.empty()) {
3017     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3018     if (!Cost->requiresScalarEpilogue(VF))
3019       // If there is an epilogue which must run, there's no edge from the
3020       // middle block to exit blocks  and thus no need to update the immediate
3021       // dominator of the exit blocks.
3022       DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3023   }
3024 
3025   LoopBypassBlocks.push_back(SCEVCheckBlock);
3026   AddedSafetyChecks = true;
3027   return SCEVCheckBlock;
3028 }
3029 
3030 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(BasicBlock *Bypass) {
3031   // VPlan-native path does not do any analysis for runtime checks currently.
3032   if (EnableVPlanNativePath)
3033     return nullptr;
3034 
3035   BasicBlock *const MemCheckBlock =
3036       RTChecks.emitMemRuntimeChecks(Bypass, LoopVectorPreHeader);
3037 
3038   // Check if we generated code that checks in runtime if arrays overlap. We put
3039   // the checks into a separate block to make the more common case of few
3040   // elements faster.
3041   if (!MemCheckBlock)
3042     return nullptr;
3043 
3044   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3045     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3046            "Cannot emit memory checks when optimizing for size, unless forced "
3047            "to vectorize.");
3048     ORE->emit([&]() {
3049       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3050                                         OrigLoop->getStartLoc(),
3051                                         OrigLoop->getHeader())
3052              << "Code-size may be reduced by not forcing "
3053                 "vectorization, or by source-code modifications "
3054                 "eliminating the need for runtime checks "
3055                 "(e.g., adding 'restrict').";
3056     });
3057   }
3058 
3059   LoopBypassBlocks.push_back(MemCheckBlock);
3060 
3061   AddedSafetyChecks = true;
3062 
3063   // We currently don't use LoopVersioning for the actual loop cloning but we
3064   // still use it to add the noalias metadata.
3065   LVer = std::make_unique<LoopVersioning>(
3066       *Legal->getLAI(),
3067       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3068       DT, PSE.getSE());
3069   LVer->prepareNoAliasMetadata();
3070   return MemCheckBlock;
3071 }
3072 
3073 void InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3074   LoopScalarBody = OrigLoop->getHeader();
3075   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3076   assert(LoopVectorPreHeader && "Invalid loop structure");
3077   LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr
3078   assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) &&
3079          "multiple exit loop without required epilogue?");
3080 
3081   LoopMiddleBlock =
3082       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3083                  LI, nullptr, Twine(Prefix) + "middle.block");
3084   LoopScalarPreHeader =
3085       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3086                  nullptr, Twine(Prefix) + "scalar.ph");
3087 
3088   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3089 
3090   // Set up the middle block terminator.  Two cases:
3091   // 1) If we know that we must execute the scalar epilogue, emit an
3092   //    unconditional branch.
3093   // 2) Otherwise, we must have a single unique exit block (due to how we
3094   //    implement the multiple exit case).  In this case, set up a conditonal
3095   //    branch from the middle block to the loop scalar preheader, and the
3096   //    exit block.  completeLoopSkeleton will update the condition to use an
3097   //    iteration check, if required to decide whether to execute the remainder.
3098   BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ?
3099     BranchInst::Create(LoopScalarPreHeader) :
3100     BranchInst::Create(LoopExitBlock, LoopScalarPreHeader,
3101                        Builder.getTrue());
3102   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3103   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3104 
3105   // Update dominator for loop exit. During skeleton creation, only the vector
3106   // pre-header and the middle block are created. The vector loop is entirely
3107   // created during VPlan exection.
3108   if (!Cost->requiresScalarEpilogue(VF))
3109     // If there is an epilogue which must run, there's no edge from the
3110     // middle block to exit blocks  and thus no need to update the immediate
3111     // dominator of the exit blocks.
3112     DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3113 }
3114 
3115 void InnerLoopVectorizer::createInductionResumeValues(
3116     std::pair<BasicBlock *, Value *> AdditionalBypass) {
3117   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3118           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3119          "Inconsistent information about additional bypass.");
3120 
3121   Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader);
3122   assert(VectorTripCount && "Expected valid arguments");
3123   // We are going to resume the execution of the scalar loop.
3124   // Go over all of the induction variables that we found and fix the
3125   // PHIs that are left in the scalar version of the loop.
3126   // The starting values of PHI nodes depend on the counter of the last
3127   // iteration in the vectorized loop.
3128   // If we come from a bypass edge then we need to start from the original
3129   // start value.
3130   Instruction *OldInduction = Legal->getPrimaryInduction();
3131   for (auto &InductionEntry : Legal->getInductionVars()) {
3132     PHINode *OrigPhi = InductionEntry.first;
3133     InductionDescriptor II = InductionEntry.second;
3134 
3135     // Create phi nodes to merge from the  backedge-taken check block.
3136     PHINode *BCResumeVal =
3137         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3138                         LoopScalarPreHeader->getTerminator());
3139     // Copy original phi DL over to the new one.
3140     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3141     Value *&EndValue = IVEndValues[OrigPhi];
3142     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3143     if (OrigPhi == OldInduction) {
3144       // We know what the end value is.
3145       EndValue = VectorTripCount;
3146     } else {
3147       IRBuilder<> B(LoopVectorPreHeader->getTerminator());
3148 
3149       // Fast-math-flags propagate from the original induction instruction.
3150       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3151         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3152 
3153       Type *StepType = II.getStep()->getType();
3154       Instruction::CastOps CastOp =
3155           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3156       Value *VTC = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.vtc");
3157       Value *Step =
3158           CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint());
3159       EndValue = emitTransformedIndex(B, VTC, II.getStartValue(), Step, II);
3160       EndValue->setName("ind.end");
3161 
3162       // Compute the end value for the additional bypass (if applicable).
3163       if (AdditionalBypass.first) {
3164         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3165         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3166                                          StepType, true);
3167         Value *Step =
3168             CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint());
3169         VTC =
3170             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.vtc");
3171         EndValueFromAdditionalBypass =
3172             emitTransformedIndex(B, VTC, II.getStartValue(), Step, II);
3173         EndValueFromAdditionalBypass->setName("ind.end");
3174       }
3175     }
3176     // The new PHI merges the original incoming value, in case of a bypass,
3177     // or the value at the end of the vectorized loop.
3178     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3179 
3180     // Fix the scalar body counter (PHI node).
3181     // The old induction's phi node in the scalar body needs the truncated
3182     // value.
3183     for (BasicBlock *BB : LoopBypassBlocks)
3184       BCResumeVal->addIncoming(II.getStartValue(), BB);
3185 
3186     if (AdditionalBypass.first)
3187       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3188                                             EndValueFromAdditionalBypass);
3189 
3190     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3191   }
3192 }
3193 
3194 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(MDNode *OrigLoopID) {
3195   // The trip counts should be cached by now.
3196   Value *Count = getOrCreateTripCount(LoopVectorPreHeader);
3197   Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader);
3198 
3199   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3200 
3201   // Add a check in the middle block to see if we have completed
3202   // all of the iterations in the first vector loop.  Three cases:
3203   // 1) If we require a scalar epilogue, there is no conditional branch as
3204   //    we unconditionally branch to the scalar preheader.  Do nothing.
3205   // 2) If (N - N%VF) == N, then we *don't* need to run the remainder.
3206   //    Thus if tail is to be folded, we know we don't need to run the
3207   //    remainder and we can use the previous value for the condition (true).
3208   // 3) Otherwise, construct a runtime check.
3209   if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) {
3210     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3211                                         Count, VectorTripCount, "cmp.n",
3212                                         LoopMiddleBlock->getTerminator());
3213 
3214     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3215     // of the corresponding compare because they may have ended up with
3216     // different line numbers and we want to avoid awkward line stepping while
3217     // debugging. Eg. if the compare has got a line number inside the loop.
3218     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3219     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3220   }
3221 
3222 #ifdef EXPENSIVE_CHECKS
3223   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3224 #endif
3225 
3226   return LoopVectorPreHeader;
3227 }
3228 
3229 std::pair<BasicBlock *, Value *>
3230 InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3231   /*
3232    In this function we generate a new loop. The new loop will contain
3233    the vectorized instructions while the old loop will continue to run the
3234    scalar remainder.
3235 
3236        [ ] <-- loop iteration number check.
3237     /   |
3238    /    v
3239   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3240   |  /  |
3241   | /   v
3242   ||   [ ]     <-- vector pre header.
3243   |/    |
3244   |     v
3245   |    [  ] \
3246   |    [  ]_|   <-- vector loop (created during VPlan execution).
3247   |     |
3248   |     v
3249   \   -[ ]   <--- middle-block.
3250    \/   |
3251    /\   v
3252    | ->[ ]     <--- new preheader.
3253    |    |
3254  (opt)  v      <-- edge from middle to exit iff epilogue is not required.
3255    |   [ ] \
3256    |   [ ]_|   <-- old scalar loop to handle remainder (scalar epilogue).
3257     \   |
3258      \  v
3259       >[ ]     <-- exit block(s).
3260    ...
3261    */
3262 
3263   // Get the metadata of the original loop before it gets modified.
3264   MDNode *OrigLoopID = OrigLoop->getLoopID();
3265 
3266   // Workaround!  Compute the trip count of the original loop and cache it
3267   // before we start modifying the CFG.  This code has a systemic problem
3268   // wherein it tries to run analysis over partially constructed IR; this is
3269   // wrong, and not simply for SCEV.  The trip count of the original loop
3270   // simply happens to be prone to hitting this in practice.  In theory, we
3271   // can hit the same issue for any SCEV, or ValueTracking query done during
3272   // mutation.  See PR49900.
3273   getOrCreateTripCount(OrigLoop->getLoopPreheader());
3274 
3275   // Create an empty vector loop, and prepare basic blocks for the runtime
3276   // checks.
3277   createVectorLoopSkeleton("");
3278 
3279   // Now, compare the new count to zero. If it is zero skip the vector loop and
3280   // jump to the scalar loop. This check also covers the case where the
3281   // backedge-taken count is uint##_max: adding one to it will overflow leading
3282   // to an incorrect trip count of zero. In this (rare) case we will also jump
3283   // to the scalar loop.
3284   emitMinimumIterationCountCheck(LoopScalarPreHeader);
3285 
3286   // Generate the code to check any assumptions that we've made for SCEV
3287   // expressions.
3288   emitSCEVChecks(LoopScalarPreHeader);
3289 
3290   // Generate the code that checks in runtime if arrays overlap. We put the
3291   // checks into a separate block to make the more common case of few elements
3292   // faster.
3293   emitMemRuntimeChecks(LoopScalarPreHeader);
3294 
3295   // Emit phis for the new starting index of the scalar loop.
3296   createInductionResumeValues();
3297 
3298   return {completeLoopSkeleton(OrigLoopID), nullptr};
3299 }
3300 
3301 // Fix up external users of the induction variable. At this point, we are
3302 // in LCSSA form, with all external PHIs that use the IV having one input value,
3303 // coming from the remainder loop. We need those PHIs to also have a correct
3304 // value for the IV when arriving directly from the middle block.
3305 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3306                                        const InductionDescriptor &II,
3307                                        Value *VectorTripCount, Value *EndValue,
3308                                        BasicBlock *MiddleBlock,
3309                                        BasicBlock *VectorHeader) {
3310   // There are two kinds of external IV usages - those that use the value
3311   // computed in the last iteration (the PHI) and those that use the penultimate
3312   // value (the value that feeds into the phi from the loop latch).
3313   // We allow both, but they, obviously, have different values.
3314 
3315   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3316 
3317   DenseMap<Value *, Value *> MissingVals;
3318 
3319   // An external user of the last iteration's value should see the value that
3320   // the remainder loop uses to initialize its own IV.
3321   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3322   for (User *U : PostInc->users()) {
3323     Instruction *UI = cast<Instruction>(U);
3324     if (!OrigLoop->contains(UI)) {
3325       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3326       MissingVals[UI] = EndValue;
3327     }
3328   }
3329 
3330   // An external user of the penultimate value need to see EndValue - Step.
3331   // The simplest way to get this is to recompute it from the constituent SCEVs,
3332   // that is Start + (Step * (CRD - 1)).
3333   for (User *U : OrigPhi->users()) {
3334     auto *UI = cast<Instruction>(U);
3335     if (!OrigLoop->contains(UI)) {
3336       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3337 
3338       IRBuilder<> B(MiddleBlock->getTerminator());
3339 
3340       // Fast-math-flags propagate from the original induction instruction.
3341       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3342         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3343 
3344       Value *CountMinusOne = B.CreateSub(
3345           VectorTripCount, ConstantInt::get(VectorTripCount->getType(), 1));
3346       Value *CMO =
3347           !II.getStep()->getType()->isIntegerTy()
3348               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3349                              II.getStep()->getType())
3350               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3351       CMO->setName("cast.cmo");
3352 
3353       Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(),
3354                                     VectorHeader->getTerminator());
3355       Value *Escape =
3356           emitTransformedIndex(B, CMO, II.getStartValue(), Step, II);
3357       Escape->setName("ind.escape");
3358       MissingVals[UI] = Escape;
3359     }
3360   }
3361 
3362   for (auto &I : MissingVals) {
3363     PHINode *PHI = cast<PHINode>(I.first);
3364     // One corner case we have to handle is two IVs "chasing" each-other,
3365     // that is %IV2 = phi [...], [ %IV1, %latch ]
3366     // In this case, if IV1 has an external use, we need to avoid adding both
3367     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3368     // don't already have an incoming value for the middle block.
3369     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3370       PHI->addIncoming(I.second, MiddleBlock);
3371   }
3372 }
3373 
3374 namespace {
3375 
3376 struct CSEDenseMapInfo {
3377   static bool canHandle(const Instruction *I) {
3378     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3379            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3380   }
3381 
3382   static inline Instruction *getEmptyKey() {
3383     return DenseMapInfo<Instruction *>::getEmptyKey();
3384   }
3385 
3386   static inline Instruction *getTombstoneKey() {
3387     return DenseMapInfo<Instruction *>::getTombstoneKey();
3388   }
3389 
3390   static unsigned getHashValue(const Instruction *I) {
3391     assert(canHandle(I) && "Unknown instruction!");
3392     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3393                                                            I->value_op_end()));
3394   }
3395 
3396   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3397     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3398         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3399       return LHS == RHS;
3400     return LHS->isIdenticalTo(RHS);
3401   }
3402 };
3403 
3404 } // end anonymous namespace
3405 
3406 ///Perform cse of induction variable instructions.
3407 static void cse(BasicBlock *BB) {
3408   // Perform simple cse.
3409   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3410   for (Instruction &In : llvm::make_early_inc_range(*BB)) {
3411     if (!CSEDenseMapInfo::canHandle(&In))
3412       continue;
3413 
3414     // Check if we can replace this instruction with any of the
3415     // visited instructions.
3416     if (Instruction *V = CSEMap.lookup(&In)) {
3417       In.replaceAllUsesWith(V);
3418       In.eraseFromParent();
3419       continue;
3420     }
3421 
3422     CSEMap[&In] = &In;
3423   }
3424 }
3425 
3426 InstructionCost
3427 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3428                                               bool &NeedToScalarize) const {
3429   Function *F = CI->getCalledFunction();
3430   Type *ScalarRetTy = CI->getType();
3431   SmallVector<Type *, 4> Tys, ScalarTys;
3432   for (auto &ArgOp : CI->args())
3433     ScalarTys.push_back(ArgOp->getType());
3434 
3435   // Estimate cost of scalarized vector call. The source operands are assumed
3436   // to be vectors, so we need to extract individual elements from there,
3437   // execute VF scalar calls, and then gather the result into the vector return
3438   // value.
3439   InstructionCost ScalarCallCost =
3440       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3441   if (VF.isScalar())
3442     return ScalarCallCost;
3443 
3444   // Compute corresponding vector type for return value and arguments.
3445   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3446   for (Type *ScalarTy : ScalarTys)
3447     Tys.push_back(ToVectorTy(ScalarTy, VF));
3448 
3449   // Compute costs of unpacking argument values for the scalar calls and
3450   // packing the return values to a vector.
3451   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3452 
3453   InstructionCost Cost =
3454       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3455 
3456   // If we can't emit a vector call for this function, then the currently found
3457   // cost is the cost we need to return.
3458   NeedToScalarize = true;
3459   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3460   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3461 
3462   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3463     return Cost;
3464 
3465   // If the corresponding vector cost is cheaper, return its cost.
3466   InstructionCost VectorCallCost =
3467       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3468   if (VectorCallCost < Cost) {
3469     NeedToScalarize = false;
3470     Cost = VectorCallCost;
3471   }
3472   return Cost;
3473 }
3474 
3475 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3476   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3477     return Elt;
3478   return VectorType::get(Elt, VF);
3479 }
3480 
3481 InstructionCost
3482 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3483                                                    ElementCount VF) const {
3484   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3485   assert(ID && "Expected intrinsic call!");
3486   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3487   FastMathFlags FMF;
3488   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3489     FMF = FPMO->getFastMathFlags();
3490 
3491   SmallVector<const Value *> Arguments(CI->args());
3492   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3493   SmallVector<Type *> ParamTys;
3494   std::transform(FTy->param_begin(), FTy->param_end(),
3495                  std::back_inserter(ParamTys),
3496                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3497 
3498   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3499                                     dyn_cast<IntrinsicInst>(CI));
3500   return TTI.getIntrinsicInstrCost(CostAttrs,
3501                                    TargetTransformInfo::TCK_RecipThroughput);
3502 }
3503 
3504 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3505   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3506   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3507   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3508 }
3509 
3510 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3511   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3512   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3513   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3514 }
3515 
3516 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3517   // For every instruction `I` in MinBWs, truncate the operands, create a
3518   // truncated version of `I` and reextend its result. InstCombine runs
3519   // later and will remove any ext/trunc pairs.
3520   SmallPtrSet<Value *, 4> Erased;
3521   for (const auto &KV : Cost->getMinimalBitwidths()) {
3522     // If the value wasn't vectorized, we must maintain the original scalar
3523     // type. The absence of the value from State indicates that it
3524     // wasn't vectorized.
3525     // FIXME: Should not rely on getVPValue at this point.
3526     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3527     if (!State.hasAnyVectorValue(Def))
3528       continue;
3529     for (unsigned Part = 0; Part < UF; ++Part) {
3530       Value *I = State.get(Def, Part);
3531       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3532         continue;
3533       Type *OriginalTy = I->getType();
3534       Type *ScalarTruncatedTy =
3535           IntegerType::get(OriginalTy->getContext(), KV.second);
3536       auto *TruncatedTy = VectorType::get(
3537           ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount());
3538       if (TruncatedTy == OriginalTy)
3539         continue;
3540 
3541       IRBuilder<> B(cast<Instruction>(I));
3542       auto ShrinkOperand = [&](Value *V) -> Value * {
3543         if (auto *ZI = dyn_cast<ZExtInst>(V))
3544           if (ZI->getSrcTy() == TruncatedTy)
3545             return ZI->getOperand(0);
3546         return B.CreateZExtOrTrunc(V, TruncatedTy);
3547       };
3548 
3549       // The actual instruction modification depends on the instruction type,
3550       // unfortunately.
3551       Value *NewI = nullptr;
3552       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3553         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3554                              ShrinkOperand(BO->getOperand(1)));
3555 
3556         // Any wrapping introduced by shrinking this operation shouldn't be
3557         // considered undefined behavior. So, we can't unconditionally copy
3558         // arithmetic wrapping flags to NewI.
3559         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3560       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3561         NewI =
3562             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3563                          ShrinkOperand(CI->getOperand(1)));
3564       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3565         NewI = B.CreateSelect(SI->getCondition(),
3566                               ShrinkOperand(SI->getTrueValue()),
3567                               ShrinkOperand(SI->getFalseValue()));
3568       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3569         switch (CI->getOpcode()) {
3570         default:
3571           llvm_unreachable("Unhandled cast!");
3572         case Instruction::Trunc:
3573           NewI = ShrinkOperand(CI->getOperand(0));
3574           break;
3575         case Instruction::SExt:
3576           NewI = B.CreateSExtOrTrunc(
3577               CI->getOperand(0),
3578               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3579           break;
3580         case Instruction::ZExt:
3581           NewI = B.CreateZExtOrTrunc(
3582               CI->getOperand(0),
3583               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3584           break;
3585         }
3586       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3587         auto Elements0 =
3588             cast<VectorType>(SI->getOperand(0)->getType())->getElementCount();
3589         auto *O0 = B.CreateZExtOrTrunc(
3590             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3591         auto Elements1 =
3592             cast<VectorType>(SI->getOperand(1)->getType())->getElementCount();
3593         auto *O1 = B.CreateZExtOrTrunc(
3594             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3595 
3596         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3597       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3598         // Don't do anything with the operands, just extend the result.
3599         continue;
3600       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3601         auto Elements =
3602             cast<VectorType>(IE->getOperand(0)->getType())->getElementCount();
3603         auto *O0 = B.CreateZExtOrTrunc(
3604             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3605         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3606         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3607       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3608         auto Elements =
3609             cast<VectorType>(EE->getOperand(0)->getType())->getElementCount();
3610         auto *O0 = B.CreateZExtOrTrunc(
3611             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3612         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3613       } else {
3614         // If we don't know what to do, be conservative and don't do anything.
3615         continue;
3616       }
3617 
3618       // Lastly, extend the result.
3619       NewI->takeName(cast<Instruction>(I));
3620       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3621       I->replaceAllUsesWith(Res);
3622       cast<Instruction>(I)->eraseFromParent();
3623       Erased.insert(I);
3624       State.reset(Def, Res, Part);
3625     }
3626   }
3627 
3628   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3629   for (const auto &KV : Cost->getMinimalBitwidths()) {
3630     // If the value wasn't vectorized, we must maintain the original scalar
3631     // type. The absence of the value from State indicates that it
3632     // wasn't vectorized.
3633     // FIXME: Should not rely on getVPValue at this point.
3634     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3635     if (!State.hasAnyVectorValue(Def))
3636       continue;
3637     for (unsigned Part = 0; Part < UF; ++Part) {
3638       Value *I = State.get(Def, Part);
3639       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3640       if (Inst && Inst->use_empty()) {
3641         Value *NewI = Inst->getOperand(0);
3642         Inst->eraseFromParent();
3643         State.reset(Def, NewI, Part);
3644       }
3645     }
3646   }
3647 }
3648 
3649 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State,
3650                                             VPlan &Plan) {
3651   // Insert truncates and extends for any truncated instructions as hints to
3652   // InstCombine.
3653   if (VF.isVector())
3654     truncateToMinimalBitwidths(State);
3655 
3656   // Fix widened non-induction PHIs by setting up the PHI operands.
3657   if (OrigPHIsToFix.size()) {
3658     assert(EnableVPlanNativePath &&
3659            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3660     fixNonInductionPHIs(State);
3661   }
3662 
3663   // At this point every instruction in the original loop is widened to a
3664   // vector form. Now we need to fix the recurrences in the loop. These PHI
3665   // nodes are currently empty because we did not want to introduce cycles.
3666   // This is the second stage of vectorizing recurrences.
3667   fixCrossIterationPHIs(State);
3668 
3669   // Forget the original basic block.
3670   PSE.getSE()->forgetLoop(OrigLoop);
3671 
3672   VPBasicBlock *LatchVPBB = Plan.getVectorLoopRegion()->getExitBasicBlock();
3673   Loop *VectorLoop = LI->getLoopFor(State.CFG.VPBB2IRBB[LatchVPBB]);
3674   // If we inserted an edge from the middle block to the unique exit block,
3675   // update uses outside the loop (phis) to account for the newly inserted
3676   // edge.
3677   if (!Cost->requiresScalarEpilogue(VF)) {
3678     // Fix-up external users of the induction variables.
3679     for (auto &Entry : Legal->getInductionVars())
3680       fixupIVUsers(Entry.first, Entry.second,
3681                    getOrCreateVectorTripCount(VectorLoop->getLoopPreheader()),
3682                    IVEndValues[Entry.first], LoopMiddleBlock,
3683                    VectorLoop->getHeader());
3684 
3685     fixLCSSAPHIs(State);
3686   }
3687 
3688   for (Instruction *PI : PredicatedInstructions)
3689     sinkScalarOperands(&*PI);
3690 
3691   // Remove redundant induction instructions.
3692   cse(VectorLoop->getHeader());
3693 
3694   // Set/update profile weights for the vector and remainder loops as original
3695   // loop iterations are now distributed among them. Note that original loop
3696   // represented by LoopScalarBody becomes remainder loop after vectorization.
3697   //
3698   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
3699   // end up getting slightly roughened result but that should be OK since
3700   // profile is not inherently precise anyway. Note also possible bypass of
3701   // vector code caused by legality checks is ignored, assigning all the weight
3702   // to the vector loop, optimistically.
3703   //
3704   // For scalable vectorization we can't know at compile time how many iterations
3705   // of the loop are handled in one vector iteration, so instead assume a pessimistic
3706   // vscale of '1'.
3707   setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), VectorLoop,
3708                                LI->getLoopFor(LoopScalarBody),
3709                                VF.getKnownMinValue() * UF);
3710 }
3711 
3712 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
3713   // In order to support recurrences we need to be able to vectorize Phi nodes.
3714   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3715   // stage #2: We now need to fix the recurrences by adding incoming edges to
3716   // the currently empty PHI nodes. At this point every instruction in the
3717   // original loop is widened to a vector form so we can use them to construct
3718   // the incoming edges.
3719   VPBasicBlock *Header =
3720       State.Plan->getVectorLoopRegion()->getEntryBasicBlock();
3721   for (VPRecipeBase &R : Header->phis()) {
3722     if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
3723       fixReduction(ReductionPhi, State);
3724     else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
3725       fixFirstOrderRecurrence(FOR, State);
3726   }
3727 }
3728 
3729 void InnerLoopVectorizer::fixFirstOrderRecurrence(
3730     VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) {
3731   // This is the second phase of vectorizing first-order recurrences. An
3732   // overview of the transformation is described below. Suppose we have the
3733   // following loop.
3734   //
3735   //   for (int i = 0; i < n; ++i)
3736   //     b[i] = a[i] - a[i - 1];
3737   //
3738   // There is a first-order recurrence on "a". For this loop, the shorthand
3739   // scalar IR looks like:
3740   //
3741   //   scalar.ph:
3742   //     s_init = a[-1]
3743   //     br scalar.body
3744   //
3745   //   scalar.body:
3746   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3747   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3748   //     s2 = a[i]
3749   //     b[i] = s2 - s1
3750   //     br cond, scalar.body, ...
3751   //
3752   // In this example, s1 is a recurrence because it's value depends on the
3753   // previous iteration. In the first phase of vectorization, we created a
3754   // vector phi v1 for s1. We now complete the vectorization and produce the
3755   // shorthand vector IR shown below (for VF = 4, UF = 1).
3756   //
3757   //   vector.ph:
3758   //     v_init = vector(..., ..., ..., a[-1])
3759   //     br vector.body
3760   //
3761   //   vector.body
3762   //     i = phi [0, vector.ph], [i+4, vector.body]
3763   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
3764   //     v2 = a[i, i+1, i+2, i+3];
3765   //     v3 = vector(v1(3), v2(0, 1, 2))
3766   //     b[i, i+1, i+2, i+3] = v2 - v3
3767   //     br cond, vector.body, middle.block
3768   //
3769   //   middle.block:
3770   //     x = v2(3)
3771   //     br scalar.ph
3772   //
3773   //   scalar.ph:
3774   //     s_init = phi [x, middle.block], [a[-1], otherwise]
3775   //     br scalar.body
3776   //
3777   // After execution completes the vector loop, we extract the next value of
3778   // the recurrence (x) to use as the initial value in the scalar loop.
3779 
3780   // Extract the last vector element in the middle block. This will be the
3781   // initial value for the recurrence when jumping to the scalar loop.
3782   VPValue *PreviousDef = PhiR->getBackedgeValue();
3783   Value *Incoming = State.get(PreviousDef, UF - 1);
3784   auto *ExtractForScalar = Incoming;
3785   auto *IdxTy = Builder.getInt32Ty();
3786   if (VF.isVector()) {
3787     auto *One = ConstantInt::get(IdxTy, 1);
3788     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3789     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
3790     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
3791     ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
3792                                                     "vector.recur.extract");
3793   }
3794   // Extract the second last element in the middle block if the
3795   // Phi is used outside the loop. We need to extract the phi itself
3796   // and not the last element (the phi update in the current iteration). This
3797   // will be the value when jumping to the exit block from the LoopMiddleBlock,
3798   // when the scalar loop is not run at all.
3799   Value *ExtractForPhiUsedOutsideLoop = nullptr;
3800   if (VF.isVector()) {
3801     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
3802     auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
3803     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
3804         Incoming, Idx, "vector.recur.extract.for.phi");
3805   } else if (UF > 1)
3806     // When loop is unrolled without vectorizing, initialize
3807     // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
3808     // of `Incoming`. This is analogous to the vectorized case above: extracting
3809     // the second last element when VF > 1.
3810     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
3811 
3812   // Fix the initial value of the original recurrence in the scalar loop.
3813   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
3814   PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue());
3815   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
3816   auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue();
3817   for (auto *BB : predecessors(LoopScalarPreHeader)) {
3818     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
3819     Start->addIncoming(Incoming, BB);
3820   }
3821 
3822   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
3823   Phi->setName("scalar.recur");
3824 
3825   // Finally, fix users of the recurrence outside the loop. The users will need
3826   // either the last value of the scalar recurrence or the last value of the
3827   // vector recurrence we extracted in the middle block. Since the loop is in
3828   // LCSSA form, we just need to find all the phi nodes for the original scalar
3829   // recurrence in the exit block, and then add an edge for the middle block.
3830   // Note that LCSSA does not imply single entry when the original scalar loop
3831   // had multiple exiting edges (as we always run the last iteration in the
3832   // scalar epilogue); in that case, there is no edge from middle to exit and
3833   // and thus no phis which needed updated.
3834   if (!Cost->requiresScalarEpilogue(VF))
3835     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
3836       if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi))
3837         LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
3838 }
3839 
3840 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR,
3841                                        VPTransformState &State) {
3842   PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
3843   // Get it's reduction variable descriptor.
3844   assert(Legal->isReductionVariable(OrigPhi) &&
3845          "Unable to find the reduction variable");
3846   const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
3847 
3848   RecurKind RK = RdxDesc.getRecurrenceKind();
3849   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
3850   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
3851   setDebugLocFromInst(ReductionStartValue);
3852 
3853   VPValue *LoopExitInstDef = PhiR->getBackedgeValue();
3854   // This is the vector-clone of the value that leaves the loop.
3855   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
3856 
3857   // Wrap flags are in general invalid after vectorization, clear them.
3858   clearReductionWrapFlags(RdxDesc, State);
3859 
3860   // Before each round, move the insertion point right between
3861   // the PHIs and the values we are going to write.
3862   // This allows us to write both PHINodes and the extractelement
3863   // instructions.
3864   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3865 
3866   setDebugLocFromInst(LoopExitInst);
3867 
3868   Type *PhiTy = OrigPhi->getType();
3869 
3870   VPBasicBlock *LatchVPBB =
3871       PhiR->getParent()->getEnclosingLoopRegion()->getExitBasicBlock();
3872   BasicBlock *VectorLoopLatch = State.CFG.VPBB2IRBB[LatchVPBB];
3873   // If tail is folded by masking, the vector value to leave the loop should be
3874   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
3875   // instead of the former. For an inloop reduction the reduction will already
3876   // be predicated, and does not need to be handled here.
3877   if (Cost->foldTailByMasking() && !PhiR->isInLoop()) {
3878     for (unsigned Part = 0; Part < UF; ++Part) {
3879       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
3880       Value *Sel = nullptr;
3881       for (User *U : VecLoopExitInst->users()) {
3882         if (isa<SelectInst>(U)) {
3883           assert(!Sel && "Reduction exit feeding two selects");
3884           Sel = U;
3885         } else
3886           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
3887       }
3888       assert(Sel && "Reduction exit feeds no select");
3889       State.reset(LoopExitInstDef, Sel, Part);
3890 
3891       // If the target can create a predicated operator for the reduction at no
3892       // extra cost in the loop (for example a predicated vadd), it can be
3893       // cheaper for the select to remain in the loop than be sunk out of it,
3894       // and so use the select value for the phi instead of the old
3895       // LoopExitValue.
3896       if (PreferPredicatedReductionSelect ||
3897           TTI->preferPredicatedReductionSelect(
3898               RdxDesc.getOpcode(), PhiTy,
3899               TargetTransformInfo::ReductionFlags())) {
3900         auto *VecRdxPhi =
3901             cast<PHINode>(State.get(PhiR, Part));
3902         VecRdxPhi->setIncomingValueForBlock(VectorLoopLatch, Sel);
3903       }
3904     }
3905   }
3906 
3907   // If the vector reduction can be performed in a smaller type, we truncate
3908   // then extend the loop exit value to enable InstCombine to evaluate the
3909   // entire expression in the smaller type.
3910   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
3911     assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
3912     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
3913     Builder.SetInsertPoint(VectorLoopLatch->getTerminator());
3914     VectorParts RdxParts(UF);
3915     for (unsigned Part = 0; Part < UF; ++Part) {
3916       RdxParts[Part] = State.get(LoopExitInstDef, Part);
3917       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3918       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
3919                                         : Builder.CreateZExt(Trunc, VecTy);
3920       for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users()))
3921         if (U != Trunc) {
3922           U->replaceUsesOfWith(RdxParts[Part], Extnd);
3923           RdxParts[Part] = Extnd;
3924         }
3925     }
3926     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3927     for (unsigned Part = 0; Part < UF; ++Part) {
3928       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3929       State.reset(LoopExitInstDef, RdxParts[Part], Part);
3930     }
3931   }
3932 
3933   // Reduce all of the unrolled parts into a single vector.
3934   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
3935   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
3936 
3937   // The middle block terminator has already been assigned a DebugLoc here (the
3938   // OrigLoop's single latch terminator). We want the whole middle block to
3939   // appear to execute on this line because: (a) it is all compiler generated,
3940   // (b) these instructions are always executed after evaluating the latch
3941   // conditional branch, and (c) other passes may add new predecessors which
3942   // terminate on this line. This is the easiest way to ensure we don't
3943   // accidentally cause an extra step back into the loop while debugging.
3944   setDebugLocFromInst(LoopMiddleBlock->getTerminator());
3945   if (PhiR->isOrdered())
3946     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
3947   else {
3948     // Floating-point operations should have some FMF to enable the reduction.
3949     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
3950     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
3951     for (unsigned Part = 1; Part < UF; ++Part) {
3952       Value *RdxPart = State.get(LoopExitInstDef, Part);
3953       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
3954         ReducedPartRdx = Builder.CreateBinOp(
3955             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
3956       } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK))
3957         ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK,
3958                                            ReducedPartRdx, RdxPart);
3959       else
3960         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
3961     }
3962   }
3963 
3964   // Create the reduction after the loop. Note that inloop reductions create the
3965   // target reduction in the loop using a Reduction recipe.
3966   if (VF.isVector() && !PhiR->isInLoop()) {
3967     ReducedPartRdx =
3968         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi);
3969     // If the reduction can be performed in a smaller type, we need to extend
3970     // the reduction to the wider type before we branch to the original loop.
3971     if (PhiTy != RdxDesc.getRecurrenceType())
3972       ReducedPartRdx = RdxDesc.isSigned()
3973                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
3974                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
3975   }
3976 
3977   PHINode *ResumePhi =
3978       dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue());
3979 
3980   // Create a phi node that merges control-flow from the backedge-taken check
3981   // block and the middle block.
3982   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
3983                                         LoopScalarPreHeader->getTerminator());
3984 
3985   // If we are fixing reductions in the epilogue loop then we should already
3986   // have created a bc.merge.rdx Phi after the main vector body. Ensure that
3987   // we carry over the incoming values correctly.
3988   for (auto *Incoming : predecessors(LoopScalarPreHeader)) {
3989     if (Incoming == LoopMiddleBlock)
3990       BCBlockPhi->addIncoming(ReducedPartRdx, Incoming);
3991     else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming))
3992       BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming),
3993                               Incoming);
3994     else
3995       BCBlockPhi->addIncoming(ReductionStartValue, Incoming);
3996   }
3997 
3998   // Set the resume value for this reduction
3999   ReductionResumeValues.insert({&RdxDesc, BCBlockPhi});
4000 
4001   // If there were stores of the reduction value to a uniform memory address
4002   // inside the loop, create the final store here.
4003   if (StoreInst *SI = RdxDesc.IntermediateStore) {
4004     StoreInst *NewSI =
4005         Builder.CreateStore(ReducedPartRdx, SI->getPointerOperand());
4006     propagateMetadata(NewSI, SI);
4007 
4008     // If the reduction value is used in other places,
4009     // then let the code below create PHI's for that.
4010   }
4011 
4012   // Now, we need to fix the users of the reduction variable
4013   // inside and outside of the scalar remainder loop.
4014 
4015   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4016   // in the exit blocks.  See comment on analogous loop in
4017   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4018   if (!Cost->requiresScalarEpilogue(VF))
4019     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4020       if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst))
4021         LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4022 
4023   // Fix the scalar loop reduction variable with the incoming reduction sum
4024   // from the vector body and from the backedge value.
4025   int IncomingEdgeBlockIdx =
4026       OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4027   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4028   // Pick the other block.
4029   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4030   OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4031   OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4032 }
4033 
4034 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
4035                                                   VPTransformState &State) {
4036   RecurKind RK = RdxDesc.getRecurrenceKind();
4037   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4038     return;
4039 
4040   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4041   assert(LoopExitInstr && "null loop exit instruction");
4042   SmallVector<Instruction *, 8> Worklist;
4043   SmallPtrSet<Instruction *, 8> Visited;
4044   Worklist.push_back(LoopExitInstr);
4045   Visited.insert(LoopExitInstr);
4046 
4047   while (!Worklist.empty()) {
4048     Instruction *Cur = Worklist.pop_back_val();
4049     if (isa<OverflowingBinaryOperator>(Cur))
4050       for (unsigned Part = 0; Part < UF; ++Part) {
4051         // FIXME: Should not rely on getVPValue at this point.
4052         Value *V = State.get(State.Plan->getVPValue(Cur, true), Part);
4053         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4054       }
4055 
4056     for (User *U : Cur->users()) {
4057       Instruction *UI = cast<Instruction>(U);
4058       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4059           Visited.insert(UI).second)
4060         Worklist.push_back(UI);
4061     }
4062   }
4063 }
4064 
4065 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4066   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4067     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4068       // Some phis were already hand updated by the reduction and recurrence
4069       // code above, leave them alone.
4070       continue;
4071 
4072     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4073     // Non-instruction incoming values will have only one value.
4074 
4075     VPLane Lane = VPLane::getFirstLane();
4076     if (isa<Instruction>(IncomingValue) &&
4077         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4078                                            VF))
4079       Lane = VPLane::getLastLaneForVF(VF);
4080 
4081     // Can be a loop invariant incoming value or the last scalar value to be
4082     // extracted from the vectorized loop.
4083     // FIXME: Should not rely on getVPValue at this point.
4084     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4085     Value *lastIncomingValue =
4086         OrigLoop->isLoopInvariant(IncomingValue)
4087             ? IncomingValue
4088             : State.get(State.Plan->getVPValue(IncomingValue, true),
4089                         VPIteration(UF - 1, Lane));
4090     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4091   }
4092 }
4093 
4094 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4095   // The basic block and loop containing the predicated instruction.
4096   auto *PredBB = PredInst->getParent();
4097   auto *VectorLoop = LI->getLoopFor(PredBB);
4098 
4099   // Initialize a worklist with the operands of the predicated instruction.
4100   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4101 
4102   // Holds instructions that we need to analyze again. An instruction may be
4103   // reanalyzed if we don't yet know if we can sink it or not.
4104   SmallVector<Instruction *, 8> InstsToReanalyze;
4105 
4106   // Returns true if a given use occurs in the predicated block. Phi nodes use
4107   // their operands in their corresponding predecessor blocks.
4108   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4109     auto *I = cast<Instruction>(U.getUser());
4110     BasicBlock *BB = I->getParent();
4111     if (auto *Phi = dyn_cast<PHINode>(I))
4112       BB = Phi->getIncomingBlock(
4113           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4114     return BB == PredBB;
4115   };
4116 
4117   // Iteratively sink the scalarized operands of the predicated instruction
4118   // into the block we created for it. When an instruction is sunk, it's
4119   // operands are then added to the worklist. The algorithm ends after one pass
4120   // through the worklist doesn't sink a single instruction.
4121   bool Changed;
4122   do {
4123     // Add the instructions that need to be reanalyzed to the worklist, and
4124     // reset the changed indicator.
4125     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4126     InstsToReanalyze.clear();
4127     Changed = false;
4128 
4129     while (!Worklist.empty()) {
4130       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4131 
4132       // We can't sink an instruction if it is a phi node, is not in the loop,
4133       // or may have side effects.
4134       if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
4135           I->mayHaveSideEffects())
4136         continue;
4137 
4138       // If the instruction is already in PredBB, check if we can sink its
4139       // operands. In that case, VPlan's sinkScalarOperands() succeeded in
4140       // sinking the scalar instruction I, hence it appears in PredBB; but it
4141       // may have failed to sink I's operands (recursively), which we try
4142       // (again) here.
4143       if (I->getParent() == PredBB) {
4144         Worklist.insert(I->op_begin(), I->op_end());
4145         continue;
4146       }
4147 
4148       // It's legal to sink the instruction if all its uses occur in the
4149       // predicated block. Otherwise, there's nothing to do yet, and we may
4150       // need to reanalyze the instruction.
4151       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4152         InstsToReanalyze.push_back(I);
4153         continue;
4154       }
4155 
4156       // Move the instruction to the beginning of the predicated block, and add
4157       // it's operands to the worklist.
4158       I->moveBefore(&*PredBB->getFirstInsertionPt());
4159       Worklist.insert(I->op_begin(), I->op_end());
4160 
4161       // The sinking may have enabled other instructions to be sunk, so we will
4162       // need to iterate.
4163       Changed = true;
4164     }
4165   } while (Changed);
4166 }
4167 
4168 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4169   for (PHINode *OrigPhi : OrigPHIsToFix) {
4170     VPWidenPHIRecipe *VPPhi =
4171         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4172     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4173     // Make sure the builder has a valid insert point.
4174     Builder.SetInsertPoint(NewPhi);
4175     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4176       VPValue *Inc = VPPhi->getIncomingValue(i);
4177       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4178       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4179     }
4180   }
4181 }
4182 
4183 bool InnerLoopVectorizer::useOrderedReductions(
4184     const RecurrenceDescriptor &RdxDesc) {
4185   return Cost->useOrderedReductions(RdxDesc);
4186 }
4187 
4188 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4189                                               VPWidenPHIRecipe *PhiR,
4190                                               VPTransformState &State) {
4191   assert(EnableVPlanNativePath &&
4192          "Non-native vplans are not expected to have VPWidenPHIRecipes.");
4193   // Currently we enter here in the VPlan-native path for non-induction
4194   // PHIs where all control flow is uniform. We simply widen these PHIs.
4195   // Create a vector phi with no operands - the vector phi operands will be
4196   // set at the end of vector code generation.
4197   Type *VecTy = (State.VF.isScalar())
4198                     ? PN->getType()
4199                     : VectorType::get(PN->getType(), State.VF);
4200   Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4201   State.set(PhiR, VecPhi, 0);
4202   OrigPHIsToFix.push_back(cast<PHINode>(PN));
4203 }
4204 
4205 /// A helper function for checking whether an integer division-related
4206 /// instruction may divide by zero (in which case it must be predicated if
4207 /// executed conditionally in the scalar code).
4208 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4209 /// Non-zero divisors that are non compile-time constants will not be
4210 /// converted into multiplication, so we will still end up scalarizing
4211 /// the division, but can do so w/o predication.
4212 static bool mayDivideByZero(Instruction &I) {
4213   assert((I.getOpcode() == Instruction::UDiv ||
4214           I.getOpcode() == Instruction::SDiv ||
4215           I.getOpcode() == Instruction::URem ||
4216           I.getOpcode() == Instruction::SRem) &&
4217          "Unexpected instruction");
4218   Value *Divisor = I.getOperand(1);
4219   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4220   return !CInt || CInt->isZero();
4221 }
4222 
4223 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4224                                                VPUser &ArgOperands,
4225                                                VPTransformState &State) {
4226   assert(!isa<DbgInfoIntrinsic>(I) &&
4227          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4228   setDebugLocFromInst(&I);
4229 
4230   Module *M = I.getParent()->getParent()->getParent();
4231   auto *CI = cast<CallInst>(&I);
4232 
4233   SmallVector<Type *, 4> Tys;
4234   for (Value *ArgOperand : CI->args())
4235     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4236 
4237   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4238 
4239   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4240   // version of the instruction.
4241   // Is it beneficial to perform intrinsic call compared to lib call?
4242   bool NeedToScalarize = false;
4243   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4244   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4245   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4246   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4247          "Instruction should be scalarized elsewhere.");
4248   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
4249          "Either the intrinsic cost or vector call cost must be valid");
4250 
4251   for (unsigned Part = 0; Part < UF; ++Part) {
4252     SmallVector<Type *, 2> TysForDecl = {CI->getType()};
4253     SmallVector<Value *, 4> Args;
4254     for (auto &I : enumerate(ArgOperands.operands())) {
4255       // Some intrinsics have a scalar argument - don't replace it with a
4256       // vector.
4257       Value *Arg;
4258       if (!UseVectorIntrinsic ||
4259           !isVectorIntrinsicWithScalarOpAtArg(ID, I.index()))
4260         Arg = State.get(I.value(), Part);
4261       else
4262         Arg = State.get(I.value(), VPIteration(0, 0));
4263       if (isVectorIntrinsicWithOverloadTypeAtArg(ID, I.index()))
4264         TysForDecl.push_back(Arg->getType());
4265       Args.push_back(Arg);
4266     }
4267 
4268     Function *VectorF;
4269     if (UseVectorIntrinsic) {
4270       // Use vector version of the intrinsic.
4271       if (VF.isVector())
4272         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4273       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4274       assert(VectorF && "Can't retrieve vector intrinsic.");
4275     } else {
4276       // Use vector version of the function call.
4277       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4278 #ifndef NDEBUG
4279       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4280              "Can't create vector function.");
4281 #endif
4282         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4283     }
4284       SmallVector<OperandBundleDef, 1> OpBundles;
4285       CI->getOperandBundlesAsDefs(OpBundles);
4286       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4287 
4288       if (isa<FPMathOperator>(V))
4289         V->copyFastMathFlags(CI);
4290 
4291       State.set(Def, V, Part);
4292       addMetadata(V, &I);
4293   }
4294 }
4295 
4296 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
4297   // We should not collect Scalars more than once per VF. Right now, this
4298   // function is called from collectUniformsAndScalars(), which already does
4299   // this check. Collecting Scalars for VF=1 does not make any sense.
4300   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
4301          "This function should not be visited twice for the same VF");
4302 
4303   // This avoids any chances of creating a REPLICATE recipe during planning
4304   // since that would result in generation of scalarized code during execution,
4305   // which is not supported for scalable vectors.
4306   if (VF.isScalable()) {
4307     Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end());
4308     return;
4309   }
4310 
4311   SmallSetVector<Instruction *, 8> Worklist;
4312 
4313   // These sets are used to seed the analysis with pointers used by memory
4314   // accesses that will remain scalar.
4315   SmallSetVector<Instruction *, 8> ScalarPtrs;
4316   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4317   auto *Latch = TheLoop->getLoopLatch();
4318 
4319   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4320   // The pointer operands of loads and stores will be scalar as long as the
4321   // memory access is not a gather or scatter operation. The value operand of a
4322   // store will remain scalar if the store is scalarized.
4323   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4324     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4325     assert(WideningDecision != CM_Unknown &&
4326            "Widening decision should be ready at this moment");
4327     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4328       if (Ptr == Store->getValueOperand())
4329         return WideningDecision == CM_Scalarize;
4330     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4331            "Ptr is neither a value or pointer operand");
4332     return WideningDecision != CM_GatherScatter;
4333   };
4334 
4335   // A helper that returns true if the given value is a bitcast or
4336   // getelementptr instruction contained in the loop.
4337   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4338     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4339             isa<GetElementPtrInst>(V)) &&
4340            !TheLoop->isLoopInvariant(V);
4341   };
4342 
4343   // A helper that evaluates a memory access's use of a pointer. If the use will
4344   // be a scalar use and the pointer is only used by memory accesses, we place
4345   // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4346   // PossibleNonScalarPtrs.
4347   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4348     // We only care about bitcast and getelementptr instructions contained in
4349     // the loop.
4350     if (!isLoopVaryingBitCastOrGEP(Ptr))
4351       return;
4352 
4353     // If the pointer has already been identified as scalar (e.g., if it was
4354     // also identified as uniform), there's nothing to do.
4355     auto *I = cast<Instruction>(Ptr);
4356     if (Worklist.count(I))
4357       return;
4358 
4359     // If the use of the pointer will be a scalar use, and all users of the
4360     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4361     // place the pointer in PossibleNonScalarPtrs.
4362     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4363           return isa<LoadInst>(U) || isa<StoreInst>(U);
4364         }))
4365       ScalarPtrs.insert(I);
4366     else
4367       PossibleNonScalarPtrs.insert(I);
4368   };
4369 
4370   // We seed the scalars analysis with three classes of instructions: (1)
4371   // instructions marked uniform-after-vectorization and (2) bitcast,
4372   // getelementptr and (pointer) phi instructions used by memory accesses
4373   // requiring a scalar use.
4374   //
4375   // (1) Add to the worklist all instructions that have been identified as
4376   // uniform-after-vectorization.
4377   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4378 
4379   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4380   // memory accesses requiring a scalar use. The pointer operands of loads and
4381   // stores will be scalar as long as the memory accesses is not a gather or
4382   // scatter operation. The value operand of a store will remain scalar if the
4383   // store is scalarized.
4384   for (auto *BB : TheLoop->blocks())
4385     for (auto &I : *BB) {
4386       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4387         evaluatePtrUse(Load, Load->getPointerOperand());
4388       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4389         evaluatePtrUse(Store, Store->getPointerOperand());
4390         evaluatePtrUse(Store, Store->getValueOperand());
4391       }
4392     }
4393   for (auto *I : ScalarPtrs)
4394     if (!PossibleNonScalarPtrs.count(I)) {
4395       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4396       Worklist.insert(I);
4397     }
4398 
4399   // Insert the forced scalars.
4400   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4401   // induction variable when the PHI user is scalarized.
4402   auto ForcedScalar = ForcedScalars.find(VF);
4403   if (ForcedScalar != ForcedScalars.end())
4404     for (auto *I : ForcedScalar->second)
4405       Worklist.insert(I);
4406 
4407   // Expand the worklist by looking through any bitcasts and getelementptr
4408   // instructions we've already identified as scalar. This is similar to the
4409   // expansion step in collectLoopUniforms(); however, here we're only
4410   // expanding to include additional bitcasts and getelementptr instructions.
4411   unsigned Idx = 0;
4412   while (Idx != Worklist.size()) {
4413     Instruction *Dst = Worklist[Idx++];
4414     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4415       continue;
4416     auto *Src = cast<Instruction>(Dst->getOperand(0));
4417     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4418           auto *J = cast<Instruction>(U);
4419           return !TheLoop->contains(J) || Worklist.count(J) ||
4420                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4421                   isScalarUse(J, Src));
4422         })) {
4423       Worklist.insert(Src);
4424       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4425     }
4426   }
4427 
4428   // An induction variable will remain scalar if all users of the induction
4429   // variable and induction variable update remain scalar.
4430   for (auto &Induction : Legal->getInductionVars()) {
4431     auto *Ind = Induction.first;
4432     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4433 
4434     // If tail-folding is applied, the primary induction variable will be used
4435     // to feed a vector compare.
4436     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
4437       continue;
4438 
4439     // Returns true if \p Indvar is a pointer induction that is used directly by
4440     // load/store instruction \p I.
4441     auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
4442                                               Instruction *I) {
4443       return Induction.second.getKind() ==
4444                  InductionDescriptor::IK_PtrInduction &&
4445              (isa<LoadInst>(I) || isa<StoreInst>(I)) &&
4446              Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar);
4447     };
4448 
4449     // Determine if all users of the induction variable are scalar after
4450     // vectorization.
4451     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4452       auto *I = cast<Instruction>(U);
4453       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4454              IsDirectLoadStoreFromPtrIndvar(Ind, I);
4455     });
4456     if (!ScalarInd)
4457       continue;
4458 
4459     // Determine if all users of the induction variable update instruction are
4460     // scalar after vectorization.
4461     auto ScalarIndUpdate =
4462         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4463           auto *I = cast<Instruction>(U);
4464           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4465                  IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
4466         });
4467     if (!ScalarIndUpdate)
4468       continue;
4469 
4470     // The induction variable and its update instruction will remain scalar.
4471     Worklist.insert(Ind);
4472     Worklist.insert(IndUpdate);
4473     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4474     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4475                       << "\n");
4476   }
4477 
4478   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4479 }
4480 
4481 bool LoopVectorizationCostModel::isScalarWithPredication(
4482     Instruction *I, ElementCount VF) const {
4483   if (!blockNeedsPredicationForAnyReason(I->getParent()))
4484     return false;
4485   switch(I->getOpcode()) {
4486   default:
4487     break;
4488   case Instruction::Load:
4489   case Instruction::Store: {
4490     if (!Legal->isMaskRequired(I))
4491       return false;
4492     auto *Ptr = getLoadStorePointerOperand(I);
4493     auto *Ty = getLoadStoreType(I);
4494     Type *VTy = Ty;
4495     if (VF.isVector())
4496       VTy = VectorType::get(Ty, VF);
4497     const Align Alignment = getLoadStoreAlignment(I);
4498     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4499                                 TTI.isLegalMaskedGather(VTy, Alignment))
4500                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4501                                 TTI.isLegalMaskedScatter(VTy, Alignment));
4502   }
4503   case Instruction::UDiv:
4504   case Instruction::SDiv:
4505   case Instruction::SRem:
4506   case Instruction::URem:
4507     return mayDivideByZero(*I);
4508   }
4509   return false;
4510 }
4511 
4512 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
4513     Instruction *I, ElementCount VF) {
4514   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4515   assert(getWideningDecision(I, VF) == CM_Unknown &&
4516          "Decision should not be set yet.");
4517   auto *Group = getInterleavedAccessGroup(I);
4518   assert(Group && "Must have a group.");
4519 
4520   // If the instruction's allocated size doesn't equal it's type size, it
4521   // requires padding and will be scalarized.
4522   auto &DL = I->getModule()->getDataLayout();
4523   auto *ScalarTy = getLoadStoreType(I);
4524   if (hasIrregularType(ScalarTy, DL))
4525     return false;
4526 
4527   // If the group involves a non-integral pointer, we may not be able to
4528   // losslessly cast all values to a common type.
4529   unsigned InterleaveFactor = Group->getFactor();
4530   bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy);
4531   for (unsigned i = 0; i < InterleaveFactor; i++) {
4532     Instruction *Member = Group->getMember(i);
4533     if (!Member)
4534       continue;
4535     auto *MemberTy = getLoadStoreType(Member);
4536     bool MemberNI = DL.isNonIntegralPointerType(MemberTy);
4537     // Don't coerce non-integral pointers to integers or vice versa.
4538     if (MemberNI != ScalarNI) {
4539       // TODO: Consider adding special nullptr value case here
4540       return false;
4541     } else if (MemberNI && ScalarNI &&
4542                ScalarTy->getPointerAddressSpace() !=
4543                MemberTy->getPointerAddressSpace()) {
4544       return false;
4545     }
4546   }
4547 
4548   // Check if masking is required.
4549   // A Group may need masking for one of two reasons: it resides in a block that
4550   // needs predication, or it was decided to use masking to deal with gaps
4551   // (either a gap at the end of a load-access that may result in a speculative
4552   // load, or any gaps in a store-access).
4553   bool PredicatedAccessRequiresMasking =
4554       blockNeedsPredicationForAnyReason(I->getParent()) &&
4555       Legal->isMaskRequired(I);
4556   bool LoadAccessWithGapsRequiresEpilogMasking =
4557       isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
4558       !isScalarEpilogueAllowed();
4559   bool StoreAccessWithGapsRequiresMasking =
4560       isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor());
4561   if (!PredicatedAccessRequiresMasking &&
4562       !LoadAccessWithGapsRequiresEpilogMasking &&
4563       !StoreAccessWithGapsRequiresMasking)
4564     return true;
4565 
4566   // If masked interleaving is required, we expect that the user/target had
4567   // enabled it, because otherwise it either wouldn't have been created or
4568   // it should have been invalidated by the CostModel.
4569   assert(useMaskedInterleavedAccesses(TTI) &&
4570          "Masked interleave-groups for predicated accesses are not enabled.");
4571 
4572   if (Group->isReverse())
4573     return false;
4574 
4575   auto *Ty = getLoadStoreType(I);
4576   const Align Alignment = getLoadStoreAlignment(I);
4577   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4578                           : TTI.isLegalMaskedStore(Ty, Alignment);
4579 }
4580 
4581 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
4582     Instruction *I, ElementCount VF) {
4583   // Get and ensure we have a valid memory instruction.
4584   assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
4585 
4586   auto *Ptr = getLoadStorePointerOperand(I);
4587   auto *ScalarTy = getLoadStoreType(I);
4588 
4589   // In order to be widened, the pointer should be consecutive, first of all.
4590   if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
4591     return false;
4592 
4593   // If the instruction is a store located in a predicated block, it will be
4594   // scalarized.
4595   if (isScalarWithPredication(I, VF))
4596     return false;
4597 
4598   // If the instruction's allocated size doesn't equal it's type size, it
4599   // requires padding and will be scalarized.
4600   auto &DL = I->getModule()->getDataLayout();
4601   if (hasIrregularType(ScalarTy, DL))
4602     return false;
4603 
4604   return true;
4605 }
4606 
4607 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
4608   // We should not collect Uniforms more than once per VF. Right now,
4609   // this function is called from collectUniformsAndScalars(), which
4610   // already does this check. Collecting Uniforms for VF=1 does not make any
4611   // sense.
4612 
4613   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
4614          "This function should not be visited twice for the same VF");
4615 
4616   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4617   // not analyze again.  Uniforms.count(VF) will return 1.
4618   Uniforms[VF].clear();
4619 
4620   // We now know that the loop is vectorizable!
4621   // Collect instructions inside the loop that will remain uniform after
4622   // vectorization.
4623 
4624   // Global values, params and instructions outside of current loop are out of
4625   // scope.
4626   auto isOutOfScope = [&](Value *V) -> bool {
4627     Instruction *I = dyn_cast<Instruction>(V);
4628     return (!I || !TheLoop->contains(I));
4629   };
4630 
4631   // Worklist containing uniform instructions demanding lane 0.
4632   SetVector<Instruction *> Worklist;
4633   BasicBlock *Latch = TheLoop->getLoopLatch();
4634 
4635   // Add uniform instructions demanding lane 0 to the worklist. Instructions
4636   // that are scalar with predication must not be considered uniform after
4637   // vectorization, because that would create an erroneous replicating region
4638   // where only a single instance out of VF should be formed.
4639   // TODO: optimize such seldom cases if found important, see PR40816.
4640   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
4641     if (isOutOfScope(I)) {
4642       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
4643                         << *I << "\n");
4644       return;
4645     }
4646     if (isScalarWithPredication(I, VF)) {
4647       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
4648                         << *I << "\n");
4649       return;
4650     }
4651     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
4652     Worklist.insert(I);
4653   };
4654 
4655   // Start with the conditional branch. If the branch condition is an
4656   // instruction contained in the loop that is only used by the branch, it is
4657   // uniform.
4658   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4659   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
4660     addToWorklistIfAllowed(Cmp);
4661 
4662   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
4663     InstWidening WideningDecision = getWideningDecision(I, VF);
4664     assert(WideningDecision != CM_Unknown &&
4665            "Widening decision should be ready at this moment");
4666 
4667     // A uniform memory op is itself uniform.  We exclude uniform stores
4668     // here as they demand the last lane, not the first one.
4669     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
4670       assert(WideningDecision == CM_Scalarize);
4671       return true;
4672     }
4673 
4674     return (WideningDecision == CM_Widen ||
4675             WideningDecision == CM_Widen_Reverse ||
4676             WideningDecision == CM_Interleave);
4677   };
4678 
4679 
4680   // Returns true if Ptr is the pointer operand of a memory access instruction
4681   // I, and I is known to not require scalarization.
4682   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
4683     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
4684   };
4685 
4686   // Holds a list of values which are known to have at least one uniform use.
4687   // Note that there may be other uses which aren't uniform.  A "uniform use"
4688   // here is something which only demands lane 0 of the unrolled iterations;
4689   // it does not imply that all lanes produce the same value (e.g. this is not
4690   // the usual meaning of uniform)
4691   SetVector<Value *> HasUniformUse;
4692 
4693   // Scan the loop for instructions which are either a) known to have only
4694   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
4695   for (auto *BB : TheLoop->blocks())
4696     for (auto &I : *BB) {
4697       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
4698         switch (II->getIntrinsicID()) {
4699         case Intrinsic::sideeffect:
4700         case Intrinsic::experimental_noalias_scope_decl:
4701         case Intrinsic::assume:
4702         case Intrinsic::lifetime_start:
4703         case Intrinsic::lifetime_end:
4704           if (TheLoop->hasLoopInvariantOperands(&I))
4705             addToWorklistIfAllowed(&I);
4706           break;
4707         default:
4708           break;
4709         }
4710       }
4711 
4712       // ExtractValue instructions must be uniform, because the operands are
4713       // known to be loop-invariant.
4714       if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
4715         assert(isOutOfScope(EVI->getAggregateOperand()) &&
4716                "Expected aggregate value to be loop invariant");
4717         addToWorklistIfAllowed(EVI);
4718         continue;
4719       }
4720 
4721       // If there's no pointer operand, there's nothing to do.
4722       auto *Ptr = getLoadStorePointerOperand(&I);
4723       if (!Ptr)
4724         continue;
4725 
4726       // A uniform memory op is itself uniform.  We exclude uniform stores
4727       // here as they demand the last lane, not the first one.
4728       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
4729         addToWorklistIfAllowed(&I);
4730 
4731       if (isUniformDecision(&I, VF)) {
4732         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
4733         HasUniformUse.insert(Ptr);
4734       }
4735     }
4736 
4737   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
4738   // demanding) users.  Since loops are assumed to be in LCSSA form, this
4739   // disallows uses outside the loop as well.
4740   for (auto *V : HasUniformUse) {
4741     if (isOutOfScope(V))
4742       continue;
4743     auto *I = cast<Instruction>(V);
4744     auto UsersAreMemAccesses =
4745       llvm::all_of(I->users(), [&](User *U) -> bool {
4746         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
4747       });
4748     if (UsersAreMemAccesses)
4749       addToWorklistIfAllowed(I);
4750   }
4751 
4752   // Expand Worklist in topological order: whenever a new instruction
4753   // is added , its users should be already inside Worklist.  It ensures
4754   // a uniform instruction will only be used by uniform instructions.
4755   unsigned idx = 0;
4756   while (idx != Worklist.size()) {
4757     Instruction *I = Worklist[idx++];
4758 
4759     for (auto OV : I->operand_values()) {
4760       // isOutOfScope operands cannot be uniform instructions.
4761       if (isOutOfScope(OV))
4762         continue;
4763       // First order recurrence Phi's should typically be considered
4764       // non-uniform.
4765       auto *OP = dyn_cast<PHINode>(OV);
4766       if (OP && Legal->isFirstOrderRecurrence(OP))
4767         continue;
4768       // If all the users of the operand are uniform, then add the
4769       // operand into the uniform worklist.
4770       auto *OI = cast<Instruction>(OV);
4771       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
4772             auto *J = cast<Instruction>(U);
4773             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
4774           }))
4775         addToWorklistIfAllowed(OI);
4776     }
4777   }
4778 
4779   // For an instruction to be added into Worklist above, all its users inside
4780   // the loop should also be in Worklist. However, this condition cannot be
4781   // true for phi nodes that form a cyclic dependence. We must process phi
4782   // nodes separately. An induction variable will remain uniform if all users
4783   // of the induction variable and induction variable update remain uniform.
4784   // The code below handles both pointer and non-pointer induction variables.
4785   for (auto &Induction : Legal->getInductionVars()) {
4786     auto *Ind = Induction.first;
4787     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4788 
4789     // Determine if all users of the induction variable are uniform after
4790     // vectorization.
4791     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4792       auto *I = cast<Instruction>(U);
4793       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4794              isVectorizedMemAccessUse(I, Ind);
4795     });
4796     if (!UniformInd)
4797       continue;
4798 
4799     // Determine if all users of the induction variable update instruction are
4800     // uniform after vectorization.
4801     auto UniformIndUpdate =
4802         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4803           auto *I = cast<Instruction>(U);
4804           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4805                  isVectorizedMemAccessUse(I, IndUpdate);
4806         });
4807     if (!UniformIndUpdate)
4808       continue;
4809 
4810     // The induction variable and its update instruction will remain uniform.
4811     addToWorklistIfAllowed(Ind);
4812     addToWorklistIfAllowed(IndUpdate);
4813   }
4814 
4815   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
4816 }
4817 
4818 bool LoopVectorizationCostModel::runtimeChecksRequired() {
4819   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
4820 
4821   if (Legal->getRuntimePointerChecking()->Need) {
4822     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
4823         "runtime pointer checks needed. Enable vectorization of this "
4824         "loop with '#pragma clang loop vectorize(enable)' when "
4825         "compiling with -Os/-Oz",
4826         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4827     return true;
4828   }
4829 
4830   if (!PSE.getPredicate().isAlwaysTrue()) {
4831     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
4832         "runtime SCEV checks needed. Enable vectorization of this "
4833         "loop with '#pragma clang loop vectorize(enable)' when "
4834         "compiling with -Os/-Oz",
4835         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4836     return true;
4837   }
4838 
4839   // FIXME: Avoid specializing for stride==1 instead of bailing out.
4840   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
4841     reportVectorizationFailure("Runtime stride check for small trip count",
4842         "runtime stride == 1 checks needed. Enable vectorization of "
4843         "this loop without such check by compiling with -Os/-Oz",
4844         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4845     return true;
4846   }
4847 
4848   return false;
4849 }
4850 
4851 ElementCount
4852 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
4853   if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
4854     return ElementCount::getScalable(0);
4855 
4856   if (Hints->isScalableVectorizationDisabled()) {
4857     reportVectorizationInfo("Scalable vectorization is explicitly disabled",
4858                             "ScalableVectorizationDisabled", ORE, TheLoop);
4859     return ElementCount::getScalable(0);
4860   }
4861 
4862   LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
4863 
4864   auto MaxScalableVF = ElementCount::getScalable(
4865       std::numeric_limits<ElementCount::ScalarTy>::max());
4866 
4867   // Test that the loop-vectorizer can legalize all operations for this MaxVF.
4868   // FIXME: While for scalable vectors this is currently sufficient, this should
4869   // be replaced by a more detailed mechanism that filters out specific VFs,
4870   // instead of invalidating vectorization for a whole set of VFs based on the
4871   // MaxVF.
4872 
4873   // Disable scalable vectorization if the loop contains unsupported reductions.
4874   if (!canVectorizeReductions(MaxScalableVF)) {
4875     reportVectorizationInfo(
4876         "Scalable vectorization not supported for the reduction "
4877         "operations found in this loop.",
4878         "ScalableVFUnfeasible", ORE, TheLoop);
4879     return ElementCount::getScalable(0);
4880   }
4881 
4882   // Disable scalable vectorization if the loop contains any instructions
4883   // with element types not supported for scalable vectors.
4884   if (any_of(ElementTypesInLoop, [&](Type *Ty) {
4885         return !Ty->isVoidTy() &&
4886                !this->TTI.isElementTypeLegalForScalableVector(Ty);
4887       })) {
4888     reportVectorizationInfo("Scalable vectorization is not supported "
4889                             "for all element types found in this loop.",
4890                             "ScalableVFUnfeasible", ORE, TheLoop);
4891     return ElementCount::getScalable(0);
4892   }
4893 
4894   if (Legal->isSafeForAnyVectorWidth())
4895     return MaxScalableVF;
4896 
4897   // Limit MaxScalableVF by the maximum safe dependence distance.
4898   Optional<unsigned> MaxVScale = TTI.getMaxVScale();
4899   if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange))
4900     MaxVScale =
4901         TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax();
4902   MaxScalableVF = ElementCount::getScalable(
4903       MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
4904   if (!MaxScalableVF)
4905     reportVectorizationInfo(
4906         "Max legal vector width too small, scalable vectorization "
4907         "unfeasible.",
4908         "ScalableVFUnfeasible", ORE, TheLoop);
4909 
4910   return MaxScalableVF;
4911 }
4912 
4913 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF(
4914     unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) {
4915   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
4916   unsigned SmallestType, WidestType;
4917   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
4918 
4919   // Get the maximum safe dependence distance in bits computed by LAA.
4920   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
4921   // the memory accesses that is most restrictive (involved in the smallest
4922   // dependence distance).
4923   unsigned MaxSafeElements =
4924       PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
4925 
4926   auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
4927   auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
4928 
4929   LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
4930                     << ".\n");
4931   LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
4932                     << ".\n");
4933 
4934   // First analyze the UserVF, fall back if the UserVF should be ignored.
4935   if (UserVF) {
4936     auto MaxSafeUserVF =
4937         UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
4938 
4939     if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
4940       // If `VF=vscale x N` is safe, then so is `VF=N`
4941       if (UserVF.isScalable())
4942         return FixedScalableVFPair(
4943             ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
4944       else
4945         return UserVF;
4946     }
4947 
4948     assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
4949 
4950     // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
4951     // is better to ignore the hint and let the compiler choose a suitable VF.
4952     if (!UserVF.isScalable()) {
4953       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
4954                         << " is unsafe, clamping to max safe VF="
4955                         << MaxSafeFixedVF << ".\n");
4956       ORE->emit([&]() {
4957         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
4958                                           TheLoop->getStartLoc(),
4959                                           TheLoop->getHeader())
4960                << "User-specified vectorization factor "
4961                << ore::NV("UserVectorizationFactor", UserVF)
4962                << " is unsafe, clamping to maximum safe vectorization factor "
4963                << ore::NV("VectorizationFactor", MaxSafeFixedVF);
4964       });
4965       return MaxSafeFixedVF;
4966     }
4967 
4968     if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
4969       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
4970                         << " is ignored because scalable vectors are not "
4971                            "available.\n");
4972       ORE->emit([&]() {
4973         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
4974                                           TheLoop->getStartLoc(),
4975                                           TheLoop->getHeader())
4976                << "User-specified vectorization factor "
4977                << ore::NV("UserVectorizationFactor", UserVF)
4978                << " is ignored because the target does not support scalable "
4979                   "vectors. The compiler will pick a more suitable value.";
4980       });
4981     } else {
4982       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
4983                         << " is unsafe. Ignoring scalable UserVF.\n");
4984       ORE->emit([&]() {
4985         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
4986                                           TheLoop->getStartLoc(),
4987                                           TheLoop->getHeader())
4988                << "User-specified vectorization factor "
4989                << ore::NV("UserVectorizationFactor", UserVF)
4990                << " is unsafe. Ignoring the hint to let the compiler pick a "
4991                   "more suitable value.";
4992       });
4993     }
4994   }
4995 
4996   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
4997                     << " / " << WidestType << " bits.\n");
4998 
4999   FixedScalableVFPair Result(ElementCount::getFixed(1),
5000                              ElementCount::getScalable(0));
5001   if (auto MaxVF =
5002           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5003                                   MaxSafeFixedVF, FoldTailByMasking))
5004     Result.FixedVF = MaxVF;
5005 
5006   if (auto MaxVF =
5007           getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType,
5008                                   MaxSafeScalableVF, FoldTailByMasking))
5009     if (MaxVF.isScalable()) {
5010       Result.ScalableVF = MaxVF;
5011       LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
5012                         << "\n");
5013     }
5014 
5015   return Result;
5016 }
5017 
5018 FixedScalableVFPair
5019 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5020   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5021     // TODO: It may by useful to do since it's still likely to be dynamically
5022     // uniform if the target can skip.
5023     reportVectorizationFailure(
5024         "Not inserting runtime ptr check for divergent target",
5025         "runtime pointer checks needed. Not enabled for divergent target",
5026         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5027     return FixedScalableVFPair::getNone();
5028   }
5029 
5030   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5031   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5032   if (TC == 1) {
5033     reportVectorizationFailure("Single iteration (non) loop",
5034         "loop trip count is one, irrelevant for vectorization",
5035         "SingleIterationLoop", ORE, TheLoop);
5036     return FixedScalableVFPair::getNone();
5037   }
5038 
5039   switch (ScalarEpilogueStatus) {
5040   case CM_ScalarEpilogueAllowed:
5041     return computeFeasibleMaxVF(TC, UserVF, false);
5042   case CM_ScalarEpilogueNotAllowedUsePredicate:
5043     LLVM_FALLTHROUGH;
5044   case CM_ScalarEpilogueNotNeededUsePredicate:
5045     LLVM_DEBUG(
5046         dbgs() << "LV: vector predicate hint/switch found.\n"
5047                << "LV: Not allowing scalar epilogue, creating predicated "
5048                << "vector loop.\n");
5049     break;
5050   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5051     // fallthrough as a special case of OptForSize
5052   case CM_ScalarEpilogueNotAllowedOptSize:
5053     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5054       LLVM_DEBUG(
5055           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5056     else
5057       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5058                         << "count.\n");
5059 
5060     // Bail if runtime checks are required, which are not good when optimising
5061     // for size.
5062     if (runtimeChecksRequired())
5063       return FixedScalableVFPair::getNone();
5064 
5065     break;
5066   }
5067 
5068   // The only loops we can vectorize without a scalar epilogue, are loops with
5069   // a bottom-test and a single exiting block. We'd have to handle the fact
5070   // that not every instruction executes on the last iteration.  This will
5071   // require a lane mask which varies through the vector loop body.  (TODO)
5072   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5073     // If there was a tail-folding hint/switch, but we can't fold the tail by
5074     // masking, fallback to a vectorization with a scalar epilogue.
5075     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5076       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5077                            "scalar epilogue instead.\n");
5078       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5079       return computeFeasibleMaxVF(TC, UserVF, false);
5080     }
5081     return FixedScalableVFPair::getNone();
5082   }
5083 
5084   // Now try the tail folding
5085 
5086   // Invalidate interleave groups that require an epilogue if we can't mask
5087   // the interleave-group.
5088   if (!useMaskedInterleavedAccesses(TTI)) {
5089     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5090            "No decisions should have been taken at this point");
5091     // Note: There is no need to invalidate any cost modeling decisions here, as
5092     // non where taken so far.
5093     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5094   }
5095 
5096   FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true);
5097   // Avoid tail folding if the trip count is known to be a multiple of any VF
5098   // we chose.
5099   // FIXME: The condition below pessimises the case for fixed-width vectors,
5100   // when scalable VFs are also candidates for vectorization.
5101   if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
5102     ElementCount MaxFixedVF = MaxFactors.FixedVF;
5103     assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
5104            "MaxFixedVF must be a power of 2");
5105     unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
5106                                    : MaxFixedVF.getFixedValue();
5107     ScalarEvolution *SE = PSE.getSE();
5108     const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5109     const SCEV *ExitCount = SE->getAddExpr(
5110         BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5111     const SCEV *Rem = SE->getURemExpr(
5112         SE->applyLoopGuards(ExitCount, TheLoop),
5113         SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5114     if (Rem->isZero()) {
5115       // Accept MaxFixedVF if we do not have a tail.
5116       LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5117       return MaxFactors;
5118     }
5119   }
5120 
5121   // For scalable vectors don't use tail folding for low trip counts or
5122   // optimizing for code size. We only permit this if the user has explicitly
5123   // requested it.
5124   if (ScalarEpilogueStatus != CM_ScalarEpilogueNotNeededUsePredicate &&
5125       ScalarEpilogueStatus != CM_ScalarEpilogueNotAllowedUsePredicate &&
5126       MaxFactors.ScalableVF.isVector())
5127     MaxFactors.ScalableVF = ElementCount::getScalable(0);
5128 
5129   // If we don't know the precise trip count, or if the trip count that we
5130   // found modulo the vectorization factor is not zero, try to fold the tail
5131   // by masking.
5132   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5133   if (Legal->prepareToFoldTailByMasking()) {
5134     FoldTailByMasking = true;
5135     return MaxFactors;
5136   }
5137 
5138   // If there was a tail-folding hint/switch, but we can't fold the tail by
5139   // masking, fallback to a vectorization with a scalar epilogue.
5140   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5141     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5142                          "scalar epilogue instead.\n");
5143     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5144     return MaxFactors;
5145   }
5146 
5147   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5148     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5149     return FixedScalableVFPair::getNone();
5150   }
5151 
5152   if (TC == 0) {
5153     reportVectorizationFailure(
5154         "Unable to calculate the loop count due to complex control flow",
5155         "unable to calculate the loop count due to complex control flow",
5156         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5157     return FixedScalableVFPair::getNone();
5158   }
5159 
5160   reportVectorizationFailure(
5161       "Cannot optimize for size and vectorize at the same time.",
5162       "cannot optimize for size and vectorize at the same time. "
5163       "Enable vectorization of this loop with '#pragma clang loop "
5164       "vectorize(enable)' when compiling with -Os/-Oz",
5165       "NoTailLoopWithOptForSize", ORE, TheLoop);
5166   return FixedScalableVFPair::getNone();
5167 }
5168 
5169 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
5170     unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
5171     const ElementCount &MaxSafeVF, bool FoldTailByMasking) {
5172   bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
5173   TypeSize WidestRegister = TTI.getRegisterBitWidth(
5174       ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
5175                            : TargetTransformInfo::RGK_FixedWidthVector);
5176 
5177   // Convenience function to return the minimum of two ElementCounts.
5178   auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
5179     assert((LHS.isScalable() == RHS.isScalable()) &&
5180            "Scalable flags must match");
5181     return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
5182   };
5183 
5184   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5185   // Note that both WidestRegister and WidestType may not be a powers of 2.
5186   auto MaxVectorElementCount = ElementCount::get(
5187       PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
5188       ComputeScalableMaxVF);
5189   MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
5190   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5191                     << (MaxVectorElementCount * WidestType) << " bits.\n");
5192 
5193   if (!MaxVectorElementCount) {
5194     LLVM_DEBUG(dbgs() << "LV: The target has no "
5195                       << (ComputeScalableMaxVF ? "scalable" : "fixed")
5196                       << " vector registers.\n");
5197     return ElementCount::getFixed(1);
5198   }
5199 
5200   const auto TripCountEC = ElementCount::getFixed(ConstTripCount);
5201   if (ConstTripCount &&
5202       ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) &&
5203       (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) {
5204     // If loop trip count (TC) is known at compile time there is no point in
5205     // choosing VF greater than TC (as done in the loop below). Select maximum
5206     // power of two which doesn't exceed TC.
5207     // If MaxVectorElementCount is scalable, we only fall back on a fixed VF
5208     // when the TC is less than or equal to the known number of lanes.
5209     auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount);
5210     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not "
5211                          "exceeding the constant trip count: "
5212                       << ClampedConstTripCount << "\n");
5213     return ElementCount::getFixed(ClampedConstTripCount);
5214   }
5215 
5216   ElementCount MaxVF = MaxVectorElementCount;
5217   if (MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 &&
5218                             TTI.shouldMaximizeVectorBandwidth())) {
5219     auto MaxVectorElementCountMaxBW = ElementCount::get(
5220         PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
5221         ComputeScalableMaxVF);
5222     MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
5223 
5224     // Collect all viable vectorization factors larger than the default MaxVF
5225     // (i.e. MaxVectorElementCount).
5226     SmallVector<ElementCount, 8> VFs;
5227     for (ElementCount VS = MaxVectorElementCount * 2;
5228          ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
5229       VFs.push_back(VS);
5230 
5231     // For each VF calculate its register usage.
5232     auto RUs = calculateRegisterUsage(VFs);
5233 
5234     // Select the largest VF which doesn't require more registers than existing
5235     // ones.
5236     for (int i = RUs.size() - 1; i >= 0; --i) {
5237       bool Selected = true;
5238       for (auto &pair : RUs[i].MaxLocalUsers) {
5239         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5240         if (pair.second > TargetNumRegisters)
5241           Selected = false;
5242       }
5243       if (Selected) {
5244         MaxVF = VFs[i];
5245         break;
5246       }
5247     }
5248     if (ElementCount MinVF =
5249             TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
5250       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5251         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5252                           << ") with target's minimum: " << MinVF << '\n');
5253         MaxVF = MinVF;
5254       }
5255     }
5256 
5257     // Invalidate any widening decisions we might have made, in case the loop
5258     // requires prediction (decided later), but we have already made some
5259     // load/store widening decisions.
5260     invalidateCostModelingDecisions();
5261   }
5262   return MaxVF;
5263 }
5264 
5265 Optional<unsigned> LoopVectorizationCostModel::getVScaleForTuning() const {
5266   if (TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
5267     auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange);
5268     auto Min = Attr.getVScaleRangeMin();
5269     auto Max = Attr.getVScaleRangeMax();
5270     if (Max && Min == Max)
5271       return Max;
5272   }
5273 
5274   return TTI.getVScaleForTuning();
5275 }
5276 
5277 bool LoopVectorizationCostModel::isMoreProfitable(
5278     const VectorizationFactor &A, const VectorizationFactor &B) const {
5279   InstructionCost CostA = A.Cost;
5280   InstructionCost CostB = B.Cost;
5281 
5282   unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
5283 
5284   if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
5285       MaxTripCount) {
5286     // If we are folding the tail and the trip count is a known (possibly small)
5287     // constant, the trip count will be rounded up to an integer number of
5288     // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
5289     // which we compare directly. When not folding the tail, the total cost will
5290     // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
5291     // approximated with the per-lane cost below instead of using the tripcount
5292     // as here.
5293     auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
5294     auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
5295     return RTCostA < RTCostB;
5296   }
5297 
5298   // Improve estimate for the vector width if it is scalable.
5299   unsigned EstimatedWidthA = A.Width.getKnownMinValue();
5300   unsigned EstimatedWidthB = B.Width.getKnownMinValue();
5301   if (Optional<unsigned> VScale = getVScaleForTuning()) {
5302     if (A.Width.isScalable())
5303       EstimatedWidthA *= VScale.getValue();
5304     if (B.Width.isScalable())
5305       EstimatedWidthB *= VScale.getValue();
5306   }
5307 
5308   // Assume vscale may be larger than 1 (or the value being tuned for),
5309   // so that scalable vectorization is slightly favorable over fixed-width
5310   // vectorization.
5311   if (A.Width.isScalable() && !B.Width.isScalable())
5312     return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA);
5313 
5314   // To avoid the need for FP division:
5315   //      (CostA / A.Width) < (CostB / B.Width)
5316   // <=>  (CostA * B.Width) < (CostB * A.Width)
5317   return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA);
5318 }
5319 
5320 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
5321     const ElementCountSet &VFCandidates) {
5322   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
5323   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
5324   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
5325   assert(VFCandidates.count(ElementCount::getFixed(1)) &&
5326          "Expected Scalar VF to be a candidate");
5327 
5328   const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost);
5329   VectorizationFactor ChosenFactor = ScalarCost;
5330 
5331   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5332   if (ForceVectorization && VFCandidates.size() > 1) {
5333     // Ignore scalar width, because the user explicitly wants vectorization.
5334     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5335     // evaluation.
5336     ChosenFactor.Cost = InstructionCost::getMax();
5337   }
5338 
5339   SmallVector<InstructionVFPair> InvalidCosts;
5340   for (const auto &i : VFCandidates) {
5341     // The cost for scalar VF=1 is already calculated, so ignore it.
5342     if (i.isScalar())
5343       continue;
5344 
5345     VectorizationCostTy C = expectedCost(i, &InvalidCosts);
5346     VectorizationFactor Candidate(i, C.first);
5347 
5348 #ifndef NDEBUG
5349     unsigned AssumedMinimumVscale = 1;
5350     if (Optional<unsigned> VScale = getVScaleForTuning())
5351       AssumedMinimumVscale = VScale.getValue();
5352     unsigned Width =
5353         Candidate.Width.isScalable()
5354             ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
5355             : Candidate.Width.getFixedValue();
5356     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5357                       << " costs: " << (Candidate.Cost / Width));
5358     if (i.isScalable())
5359       LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
5360                         << AssumedMinimumVscale << ")");
5361     LLVM_DEBUG(dbgs() << ".\n");
5362 #endif
5363 
5364     if (!C.second && !ForceVectorization) {
5365       LLVM_DEBUG(
5366           dbgs() << "LV: Not considering vector loop of width " << i
5367                  << " because it will not generate any vector instructions.\n");
5368       continue;
5369     }
5370 
5371     // If profitable add it to ProfitableVF list.
5372     if (isMoreProfitable(Candidate, ScalarCost))
5373       ProfitableVFs.push_back(Candidate);
5374 
5375     if (isMoreProfitable(Candidate, ChosenFactor))
5376       ChosenFactor = Candidate;
5377   }
5378 
5379   // Emit a report of VFs with invalid costs in the loop.
5380   if (!InvalidCosts.empty()) {
5381     // Group the remarks per instruction, keeping the instruction order from
5382     // InvalidCosts.
5383     std::map<Instruction *, unsigned> Numbering;
5384     unsigned I = 0;
5385     for (auto &Pair : InvalidCosts)
5386       if (!Numbering.count(Pair.first))
5387         Numbering[Pair.first] = I++;
5388 
5389     // Sort the list, first on instruction(number) then on VF.
5390     llvm::sort(InvalidCosts,
5391                [&Numbering](InstructionVFPair &A, InstructionVFPair &B) {
5392                  if (Numbering[A.first] != Numbering[B.first])
5393                    return Numbering[A.first] < Numbering[B.first];
5394                  ElementCountComparator ECC;
5395                  return ECC(A.second, B.second);
5396                });
5397 
5398     // For a list of ordered instruction-vf pairs:
5399     //   [(load, vf1), (load, vf2), (store, vf1)]
5400     // Group the instructions together to emit separate remarks for:
5401     //   load  (vf1, vf2)
5402     //   store (vf1)
5403     auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts);
5404     auto Subset = ArrayRef<InstructionVFPair>();
5405     do {
5406       if (Subset.empty())
5407         Subset = Tail.take_front(1);
5408 
5409       Instruction *I = Subset.front().first;
5410 
5411       // If the next instruction is different, or if there are no other pairs,
5412       // emit a remark for the collated subset. e.g.
5413       //   [(load, vf1), (load, vf2))]
5414       // to emit:
5415       //  remark: invalid costs for 'load' at VF=(vf, vf2)
5416       if (Subset == Tail || Tail[Subset.size()].first != I) {
5417         std::string OutString;
5418         raw_string_ostream OS(OutString);
5419         assert(!Subset.empty() && "Unexpected empty range");
5420         OS << "Instruction with invalid costs prevented vectorization at VF=(";
5421         for (auto &Pair : Subset)
5422           OS << (Pair.second == Subset.front().second ? "" : ", ")
5423              << Pair.second;
5424         OS << "):";
5425         if (auto *CI = dyn_cast<CallInst>(I))
5426           OS << " call to " << CI->getCalledFunction()->getName();
5427         else
5428           OS << " " << I->getOpcodeName();
5429         OS.flush();
5430         reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I);
5431         Tail = Tail.drop_front(Subset.size());
5432         Subset = {};
5433       } else
5434         // Grow the subset by one element
5435         Subset = Tail.take_front(Subset.size() + 1);
5436     } while (!Tail.empty());
5437   }
5438 
5439   if (!EnableCondStoresVectorization && NumPredStores) {
5440     reportVectorizationFailure("There are conditional stores.",
5441         "store that is conditionally executed prevents vectorization",
5442         "ConditionalStore", ORE, TheLoop);
5443     ChosenFactor = ScalarCost;
5444   }
5445 
5446   LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
5447                  ChosenFactor.Cost >= ScalarCost.Cost) dbgs()
5448              << "LV: Vectorization seems to be not beneficial, "
5449              << "but was forced by a user.\n");
5450   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
5451   return ChosenFactor;
5452 }
5453 
5454 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
5455     const Loop &L, ElementCount VF) const {
5456   // Cross iteration phis such as reductions need special handling and are
5457   // currently unsupported.
5458   if (any_of(L.getHeader()->phis(),
5459              [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); }))
5460     return false;
5461 
5462   // Phis with uses outside of the loop require special handling and are
5463   // currently unsupported.
5464   for (auto &Entry : Legal->getInductionVars()) {
5465     // Look for uses of the value of the induction at the last iteration.
5466     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
5467     for (User *U : PostInc->users())
5468       if (!L.contains(cast<Instruction>(U)))
5469         return false;
5470     // Look for uses of penultimate value of the induction.
5471     for (User *U : Entry.first->users())
5472       if (!L.contains(cast<Instruction>(U)))
5473         return false;
5474   }
5475 
5476   // Induction variables that are widened require special handling that is
5477   // currently not supported.
5478   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
5479         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
5480                  this->isProfitableToScalarize(Entry.first, VF));
5481       }))
5482     return false;
5483 
5484   // Epilogue vectorization code has not been auditted to ensure it handles
5485   // non-latch exits properly.  It may be fine, but it needs auditted and
5486   // tested.
5487   if (L.getExitingBlock() != L.getLoopLatch())
5488     return false;
5489 
5490   return true;
5491 }
5492 
5493 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
5494     const ElementCount VF) const {
5495   // FIXME: We need a much better cost-model to take different parameters such
5496   // as register pressure, code size increase and cost of extra branches into
5497   // account. For now we apply a very crude heuristic and only consider loops
5498   // with vectorization factors larger than a certain value.
5499   // We also consider epilogue vectorization unprofitable for targets that don't
5500   // consider interleaving beneficial (eg. MVE).
5501   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
5502     return false;
5503   // FIXME: We should consider changing the threshold for scalable
5504   // vectors to take VScaleForTuning into account.
5505   if (VF.getKnownMinValue() >= EpilogueVectorizationMinVF)
5506     return true;
5507   return false;
5508 }
5509 
5510 VectorizationFactor
5511 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
5512     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
5513   VectorizationFactor Result = VectorizationFactor::Disabled();
5514   if (!EnableEpilogueVectorization) {
5515     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
5516     return Result;
5517   }
5518 
5519   if (!isScalarEpilogueAllowed()) {
5520     LLVM_DEBUG(
5521         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
5522                   "allowed.\n";);
5523     return Result;
5524   }
5525 
5526   // Not really a cost consideration, but check for unsupported cases here to
5527   // simplify the logic.
5528   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
5529     LLVM_DEBUG(
5530         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
5531                   "not a supported candidate.\n";);
5532     return Result;
5533   }
5534 
5535   if (EpilogueVectorizationForceVF > 1) {
5536     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
5537     ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF);
5538     if (LVP.hasPlanWithVF(ForcedEC))
5539       return {ForcedEC, 0};
5540     else {
5541       LLVM_DEBUG(
5542           dbgs()
5543               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
5544       return Result;
5545     }
5546   }
5547 
5548   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
5549       TheLoop->getHeader()->getParent()->hasMinSize()) {
5550     LLVM_DEBUG(
5551         dbgs()
5552             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
5553     return Result;
5554   }
5555 
5556   if (!isEpilogueVectorizationProfitable(MainLoopVF)) {
5557     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
5558                          "this loop\n");
5559     return Result;
5560   }
5561 
5562   // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know
5563   // the main loop handles 8 lanes per iteration. We could still benefit from
5564   // vectorizing the epilogue loop with VF=4.
5565   ElementCount EstimatedRuntimeVF = MainLoopVF;
5566   if (MainLoopVF.isScalable()) {
5567     EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue());
5568     if (Optional<unsigned> VScale = getVScaleForTuning())
5569       EstimatedRuntimeVF *= VScale.getValue();
5570   }
5571 
5572   for (auto &NextVF : ProfitableVFs)
5573     if (((!NextVF.Width.isScalable() && MainLoopVF.isScalable() &&
5574           ElementCount::isKnownLT(NextVF.Width, EstimatedRuntimeVF)) ||
5575          ElementCount::isKnownLT(NextVF.Width, MainLoopVF)) &&
5576         (Result.Width.isScalar() || isMoreProfitable(NextVF, Result)) &&
5577         LVP.hasPlanWithVF(NextVF.Width))
5578       Result = NextVF;
5579 
5580   if (Result != VectorizationFactor::Disabled())
5581     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
5582                       << Result.Width << "\n";);
5583   return Result;
5584 }
5585 
5586 std::pair<unsigned, unsigned>
5587 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5588   unsigned MinWidth = -1U;
5589   unsigned MaxWidth = 8;
5590   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5591   // For in-loop reductions, no element types are added to ElementTypesInLoop
5592   // if there are no loads/stores in the loop. In this case, check through the
5593   // reduction variables to determine the maximum width.
5594   if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) {
5595     // Reset MaxWidth so that we can find the smallest type used by recurrences
5596     // in the loop.
5597     MaxWidth = -1U;
5598     for (auto &PhiDescriptorPair : Legal->getReductionVars()) {
5599       const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second;
5600       // When finding the min width used by the recurrence we need to account
5601       // for casts on the input operands of the recurrence.
5602       MaxWidth = std::min<unsigned>(
5603           MaxWidth, std::min<unsigned>(
5604                         RdxDesc.getMinWidthCastToRecurrenceTypeInBits(),
5605                         RdxDesc.getRecurrenceType()->getScalarSizeInBits()));
5606     }
5607   } else {
5608     for (Type *T : ElementTypesInLoop) {
5609       MinWidth = std::min<unsigned>(
5610           MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5611       MaxWidth = std::max<unsigned>(
5612           MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
5613     }
5614   }
5615   return {MinWidth, MaxWidth};
5616 }
5617 
5618 void LoopVectorizationCostModel::collectElementTypesForWidening() {
5619   ElementTypesInLoop.clear();
5620   // For each block.
5621   for (BasicBlock *BB : TheLoop->blocks()) {
5622     // For each instruction in the loop.
5623     for (Instruction &I : BB->instructionsWithoutDebug()) {
5624       Type *T = I.getType();
5625 
5626       // Skip ignored values.
5627       if (ValuesToIgnore.count(&I))
5628         continue;
5629 
5630       // Only examine Loads, Stores and PHINodes.
5631       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5632         continue;
5633 
5634       // Examine PHI nodes that are reduction variables. Update the type to
5635       // account for the recurrence type.
5636       if (auto *PN = dyn_cast<PHINode>(&I)) {
5637         if (!Legal->isReductionVariable(PN))
5638           continue;
5639         const RecurrenceDescriptor &RdxDesc =
5640             Legal->getReductionVars().find(PN)->second;
5641         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
5642             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
5643                                       RdxDesc.getRecurrenceType(),
5644                                       TargetTransformInfo::ReductionFlags()))
5645           continue;
5646         T = RdxDesc.getRecurrenceType();
5647       }
5648 
5649       // Examine the stored values.
5650       if (auto *ST = dyn_cast<StoreInst>(&I))
5651         T = ST->getValueOperand()->getType();
5652 
5653       assert(T->isSized() &&
5654              "Expected the load/store/recurrence type to be sized");
5655 
5656       ElementTypesInLoop.insert(T);
5657     }
5658   }
5659 }
5660 
5661 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
5662                                                            unsigned LoopCost) {
5663   // -- The interleave heuristics --
5664   // We interleave the loop in order to expose ILP and reduce the loop overhead.
5665   // There are many micro-architectural considerations that we can't predict
5666   // at this level. For example, frontend pressure (on decode or fetch) due to
5667   // code size, or the number and capabilities of the execution ports.
5668   //
5669   // We use the following heuristics to select the interleave count:
5670   // 1. If the code has reductions, then we interleave to break the cross
5671   // iteration dependency.
5672   // 2. If the loop is really small, then we interleave to reduce the loop
5673   // overhead.
5674   // 3. We don't interleave if we think that we will spill registers to memory
5675   // due to the increased register pressure.
5676 
5677   if (!isScalarEpilogueAllowed())
5678     return 1;
5679 
5680   // We used the distance for the interleave count.
5681   if (Legal->getMaxSafeDepDistBytes() != -1U)
5682     return 1;
5683 
5684   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
5685   const bool HasReductions = !Legal->getReductionVars().empty();
5686   // Do not interleave loops with a relatively small known or estimated trip
5687   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
5688   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
5689   // because with the above conditions interleaving can expose ILP and break
5690   // cross iteration dependences for reductions.
5691   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
5692       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
5693     return 1;
5694 
5695   // If we did not calculate the cost for VF (because the user selected the VF)
5696   // then we calculate the cost of VF here.
5697   if (LoopCost == 0) {
5698     InstructionCost C = expectedCost(VF).first;
5699     assert(C.isValid() && "Expected to have chosen a VF with valid cost");
5700     LoopCost = *C.getValue();
5701 
5702     // Loop body is free and there is no need for interleaving.
5703     if (LoopCost == 0)
5704       return 1;
5705   }
5706 
5707   RegisterUsage R = calculateRegisterUsage({VF})[0];
5708   // We divide by these constants so assume that we have at least one
5709   // instruction that uses at least one register.
5710   for (auto& pair : R.MaxLocalUsers) {
5711     pair.second = std::max(pair.second, 1U);
5712   }
5713 
5714   // We calculate the interleave count using the following formula.
5715   // Subtract the number of loop invariants from the number of available
5716   // registers. These registers are used by all of the interleaved instances.
5717   // Next, divide the remaining registers by the number of registers that is
5718   // required by the loop, in order to estimate how many parallel instances
5719   // fit without causing spills. All of this is rounded down if necessary to be
5720   // a power of two. We want power of two interleave count to simplify any
5721   // addressing operations or alignment considerations.
5722   // We also want power of two interleave counts to ensure that the induction
5723   // variable of the vector loop wraps to zero, when tail is folded by masking;
5724   // this currently happens when OptForSize, in which case IC is set to 1 above.
5725   unsigned IC = UINT_MAX;
5726 
5727   for (auto& pair : R.MaxLocalUsers) {
5728     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5729     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
5730                       << " registers of "
5731                       << TTI.getRegisterClassName(pair.first) << " register class\n");
5732     if (VF.isScalar()) {
5733       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
5734         TargetNumRegisters = ForceTargetNumScalarRegs;
5735     } else {
5736       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
5737         TargetNumRegisters = ForceTargetNumVectorRegs;
5738     }
5739     unsigned MaxLocalUsers = pair.second;
5740     unsigned LoopInvariantRegs = 0;
5741     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
5742       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
5743 
5744     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
5745     // Don't count the induction variable as interleaved.
5746     if (EnableIndVarRegisterHeur) {
5747       TmpIC =
5748           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
5749                         std::max(1U, (MaxLocalUsers - 1)));
5750     }
5751 
5752     IC = std::min(IC, TmpIC);
5753   }
5754 
5755   // Clamp the interleave ranges to reasonable counts.
5756   unsigned MaxInterleaveCount =
5757       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
5758 
5759   // Check if the user has overridden the max.
5760   if (VF.isScalar()) {
5761     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
5762       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
5763   } else {
5764     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
5765       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
5766   }
5767 
5768   // If trip count is known or estimated compile time constant, limit the
5769   // interleave count to be less than the trip count divided by VF, provided it
5770   // is at least 1.
5771   //
5772   // For scalable vectors we can't know if interleaving is beneficial. It may
5773   // not be beneficial for small loops if none of the lanes in the second vector
5774   // iterations is enabled. However, for larger loops, there is likely to be a
5775   // similar benefit as for fixed-width vectors. For now, we choose to leave
5776   // the InterleaveCount as if vscale is '1', although if some information about
5777   // the vector is known (e.g. min vector size), we can make a better decision.
5778   if (BestKnownTC) {
5779     MaxInterleaveCount =
5780         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
5781     // Make sure MaxInterleaveCount is greater than 0.
5782     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
5783   }
5784 
5785   assert(MaxInterleaveCount > 0 &&
5786          "Maximum interleave count must be greater than 0");
5787 
5788   // Clamp the calculated IC to be between the 1 and the max interleave count
5789   // that the target and trip count allows.
5790   if (IC > MaxInterleaveCount)
5791     IC = MaxInterleaveCount;
5792   else
5793     // Make sure IC is greater than 0.
5794     IC = std::max(1u, IC);
5795 
5796   assert(IC > 0 && "Interleave count must be greater than 0.");
5797 
5798   // Interleave if we vectorized this loop and there is a reduction that could
5799   // benefit from interleaving.
5800   if (VF.isVector() && HasReductions) {
5801     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
5802     return IC;
5803   }
5804 
5805   // For any scalar loop that either requires runtime checks or predication we
5806   // are better off leaving this to the unroller. Note that if we've already
5807   // vectorized the loop we will have done the runtime check and so interleaving
5808   // won't require further checks.
5809   bool ScalarInterleavingRequiresPredication =
5810       (VF.isScalar() && any_of(TheLoop->blocks(), [this](BasicBlock *BB) {
5811          return Legal->blockNeedsPredication(BB);
5812        }));
5813   bool ScalarInterleavingRequiresRuntimePointerCheck =
5814       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
5815 
5816   // We want to interleave small loops in order to reduce the loop overhead and
5817   // potentially expose ILP opportunities.
5818   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
5819                     << "LV: IC is " << IC << '\n'
5820                     << "LV: VF is " << VF << '\n');
5821   const bool AggressivelyInterleaveReductions =
5822       TTI.enableAggressiveInterleaving(HasReductions);
5823   if (!ScalarInterleavingRequiresRuntimePointerCheck &&
5824       !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) {
5825     // We assume that the cost overhead is 1 and we use the cost model
5826     // to estimate the cost of the loop and interleave until the cost of the
5827     // loop overhead is about 5% of the cost of the loop.
5828     unsigned SmallIC =
5829         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
5830 
5831     // Interleave until store/load ports (estimated by max interleave count) are
5832     // saturated.
5833     unsigned NumStores = Legal->getNumStores();
5834     unsigned NumLoads = Legal->getNumLoads();
5835     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5836     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5837 
5838     // There is little point in interleaving for reductions containing selects
5839     // and compares when VF=1 since it may just create more overhead than it's
5840     // worth for loops with small trip counts. This is because we still have to
5841     // do the final reduction after the loop.
5842     bool HasSelectCmpReductions =
5843         HasReductions &&
5844         any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
5845           const RecurrenceDescriptor &RdxDesc = Reduction.second;
5846           return RecurrenceDescriptor::isSelectCmpRecurrenceKind(
5847               RdxDesc.getRecurrenceKind());
5848         });
5849     if (HasSelectCmpReductions) {
5850       LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
5851       return 1;
5852     }
5853 
5854     // If we have a scalar reduction (vector reductions are already dealt with
5855     // by this point), we can increase the critical path length if the loop
5856     // we're interleaving is inside another loop. For tree-wise reductions
5857     // set the limit to 2, and for ordered reductions it's best to disable
5858     // interleaving entirely.
5859     if (HasReductions && TheLoop->getLoopDepth() > 1) {
5860       bool HasOrderedReductions =
5861           any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
5862             const RecurrenceDescriptor &RdxDesc = Reduction.second;
5863             return RdxDesc.isOrdered();
5864           });
5865       if (HasOrderedReductions) {
5866         LLVM_DEBUG(
5867             dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
5868         return 1;
5869       }
5870 
5871       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
5872       SmallIC = std::min(SmallIC, F);
5873       StoresIC = std::min(StoresIC, F);
5874       LoadsIC = std::min(LoadsIC, F);
5875     }
5876 
5877     if (EnableLoadStoreRuntimeInterleave &&
5878         std::max(StoresIC, LoadsIC) > SmallIC) {
5879       LLVM_DEBUG(
5880           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
5881       return std::max(StoresIC, LoadsIC);
5882     }
5883 
5884     // If there are scalar reductions and TTI has enabled aggressive
5885     // interleaving for reductions, we will interleave to expose ILP.
5886     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
5887         AggressivelyInterleaveReductions) {
5888       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5889       // Interleave no less than SmallIC but not as aggressive as the normal IC
5890       // to satisfy the rare situation when resources are too limited.
5891       return std::max(IC / 2, SmallIC);
5892     } else {
5893       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
5894       return SmallIC;
5895     }
5896   }
5897 
5898   // Interleave if this is a large loop (small loops are already dealt with by
5899   // this point) that could benefit from interleaving.
5900   if (AggressivelyInterleaveReductions) {
5901     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5902     return IC;
5903   }
5904 
5905   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
5906   return 1;
5907 }
5908 
5909 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
5910 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
5911   // This function calculates the register usage by measuring the highest number
5912   // of values that are alive at a single location. Obviously, this is a very
5913   // rough estimation. We scan the loop in a topological order in order and
5914   // assign a number to each instruction. We use RPO to ensure that defs are
5915   // met before their users. We assume that each instruction that has in-loop
5916   // users starts an interval. We record every time that an in-loop value is
5917   // used, so we have a list of the first and last occurrences of each
5918   // instruction. Next, we transpose this data structure into a multi map that
5919   // holds the list of intervals that *end* at a specific location. This multi
5920   // map allows us to perform a linear search. We scan the instructions linearly
5921   // and record each time that a new interval starts, by placing it in a set.
5922   // If we find this value in the multi-map then we remove it from the set.
5923   // The max register usage is the maximum size of the set.
5924   // We also search for instructions that are defined outside the loop, but are
5925   // used inside the loop. We need this number separately from the max-interval
5926   // usage number because when we unroll, loop-invariant values do not take
5927   // more register.
5928   LoopBlocksDFS DFS(TheLoop);
5929   DFS.perform(LI);
5930 
5931   RegisterUsage RU;
5932 
5933   // Each 'key' in the map opens a new interval. The values
5934   // of the map are the index of the 'last seen' usage of the
5935   // instruction that is the key.
5936   using IntervalMap = DenseMap<Instruction *, unsigned>;
5937 
5938   // Maps instruction to its index.
5939   SmallVector<Instruction *, 64> IdxToInstr;
5940   // Marks the end of each interval.
5941   IntervalMap EndPoint;
5942   // Saves the list of instruction indices that are used in the loop.
5943   SmallPtrSet<Instruction *, 8> Ends;
5944   // Saves the list of values that are used in the loop but are
5945   // defined outside the loop, such as arguments and constants.
5946   SmallPtrSet<Value *, 8> LoopInvariants;
5947 
5948   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
5949     for (Instruction &I : BB->instructionsWithoutDebug()) {
5950       IdxToInstr.push_back(&I);
5951 
5952       // Save the end location of each USE.
5953       for (Value *U : I.operands()) {
5954         auto *Instr = dyn_cast<Instruction>(U);
5955 
5956         // Ignore non-instruction values such as arguments, constants, etc.
5957         if (!Instr)
5958           continue;
5959 
5960         // If this instruction is outside the loop then record it and continue.
5961         if (!TheLoop->contains(Instr)) {
5962           LoopInvariants.insert(Instr);
5963           continue;
5964         }
5965 
5966         // Overwrite previous end points.
5967         EndPoint[Instr] = IdxToInstr.size();
5968         Ends.insert(Instr);
5969       }
5970     }
5971   }
5972 
5973   // Saves the list of intervals that end with the index in 'key'.
5974   using InstrList = SmallVector<Instruction *, 2>;
5975   DenseMap<unsigned, InstrList> TransposeEnds;
5976 
5977   // Transpose the EndPoints to a list of values that end at each index.
5978   for (auto &Interval : EndPoint)
5979     TransposeEnds[Interval.second].push_back(Interval.first);
5980 
5981   SmallPtrSet<Instruction *, 8> OpenIntervals;
5982   SmallVector<RegisterUsage, 8> RUs(VFs.size());
5983   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
5984 
5985   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
5986 
5987   // A lambda that gets the register usage for the given type and VF.
5988   const auto &TTICapture = TTI;
5989   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned {
5990     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
5991       return 0;
5992     InstructionCost::CostType RegUsage =
5993         *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue();
5994     assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() &&
5995            "Nonsensical values for register usage.");
5996     return RegUsage;
5997   };
5998 
5999   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6000     Instruction *I = IdxToInstr[i];
6001 
6002     // Remove all of the instructions that end at this location.
6003     InstrList &List = TransposeEnds[i];
6004     for (Instruction *ToRemove : List)
6005       OpenIntervals.erase(ToRemove);
6006 
6007     // Ignore instructions that are never used within the loop.
6008     if (!Ends.count(I))
6009       continue;
6010 
6011     // Skip ignored values.
6012     if (ValuesToIgnore.count(I))
6013       continue;
6014 
6015     // For each VF find the maximum usage of registers.
6016     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6017       // Count the number of live intervals.
6018       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6019 
6020       if (VFs[j].isScalar()) {
6021         for (auto Inst : OpenIntervals) {
6022           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6023           if (RegUsage.find(ClassID) == RegUsage.end())
6024             RegUsage[ClassID] = 1;
6025           else
6026             RegUsage[ClassID] += 1;
6027         }
6028       } else {
6029         collectUniformsAndScalars(VFs[j]);
6030         for (auto Inst : OpenIntervals) {
6031           // Skip ignored values for VF > 1.
6032           if (VecValuesToIgnore.count(Inst))
6033             continue;
6034           if (isScalarAfterVectorization(Inst, VFs[j])) {
6035             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6036             if (RegUsage.find(ClassID) == RegUsage.end())
6037               RegUsage[ClassID] = 1;
6038             else
6039               RegUsage[ClassID] += 1;
6040           } else {
6041             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6042             if (RegUsage.find(ClassID) == RegUsage.end())
6043               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6044             else
6045               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6046           }
6047         }
6048       }
6049 
6050       for (auto& pair : RegUsage) {
6051         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6052           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6053         else
6054           MaxUsages[j][pair.first] = pair.second;
6055       }
6056     }
6057 
6058     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6059                       << OpenIntervals.size() << '\n');
6060 
6061     // Add the current instruction to the list of open intervals.
6062     OpenIntervals.insert(I);
6063   }
6064 
6065   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6066     SmallMapVector<unsigned, unsigned, 4> Invariant;
6067 
6068     for (auto Inst : LoopInvariants) {
6069       unsigned Usage =
6070           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6071       unsigned ClassID =
6072           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6073       if (Invariant.find(ClassID) == Invariant.end())
6074         Invariant[ClassID] = Usage;
6075       else
6076         Invariant[ClassID] += Usage;
6077     }
6078 
6079     LLVM_DEBUG({
6080       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6081       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6082              << " item\n";
6083       for (const auto &pair : MaxUsages[i]) {
6084         dbgs() << "LV(REG): RegisterClass: "
6085                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6086                << " registers\n";
6087       }
6088       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6089              << " item\n";
6090       for (const auto &pair : Invariant) {
6091         dbgs() << "LV(REG): RegisterClass: "
6092                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6093                << " registers\n";
6094       }
6095     });
6096 
6097     RU.LoopInvariantRegs = Invariant;
6098     RU.MaxLocalUsers = MaxUsages[i];
6099     RUs[i] = RU;
6100   }
6101 
6102   return RUs;
6103 }
6104 
6105 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I,
6106                                                            ElementCount VF) {
6107   // TODO: Cost model for emulated masked load/store is completely
6108   // broken. This hack guides the cost model to use an artificially
6109   // high enough value to practically disable vectorization with such
6110   // operations, except where previously deployed legality hack allowed
6111   // using very low cost values. This is to avoid regressions coming simply
6112   // from moving "masked load/store" check from legality to cost model.
6113   // Masked Load/Gather emulation was previously never allowed.
6114   // Limited number of Masked Store/Scatter emulation was allowed.
6115   assert(isPredicatedInst(I, VF) && "Expecting a scalar emulated instruction");
6116   return isa<LoadInst>(I) ||
6117          (isa<StoreInst>(I) &&
6118           NumPredStores > NumberOfStoresToPredicate);
6119 }
6120 
6121 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6122   // If we aren't vectorizing the loop, or if we've already collected the
6123   // instructions to scalarize, there's nothing to do. Collection may already
6124   // have occurred if we have a user-selected VF and are now computing the
6125   // expected cost for interleaving.
6126   if (VF.isScalar() || VF.isZero() ||
6127       InstsToScalarize.find(VF) != InstsToScalarize.end())
6128     return;
6129 
6130   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6131   // not profitable to scalarize any instructions, the presence of VF in the
6132   // map will indicate that we've analyzed it already.
6133   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6134 
6135   // Find all the instructions that are scalar with predication in the loop and
6136   // determine if it would be better to not if-convert the blocks they are in.
6137   // If so, we also record the instructions to scalarize.
6138   for (BasicBlock *BB : TheLoop->blocks()) {
6139     if (!blockNeedsPredicationForAnyReason(BB))
6140       continue;
6141     for (Instruction &I : *BB)
6142       if (isScalarWithPredication(&I, VF)) {
6143         ScalarCostsTy ScalarCosts;
6144         // Do not apply discount if scalable, because that would lead to
6145         // invalid scalarization costs.
6146         // Do not apply discount logic if hacked cost is needed
6147         // for emulated masked memrefs.
6148         if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) &&
6149             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6150           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6151         // Remember that BB will remain after vectorization.
6152         PredicatedBBsAfterVectorization.insert(BB);
6153       }
6154   }
6155 }
6156 
6157 int LoopVectorizationCostModel::computePredInstDiscount(
6158     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6159   assert(!isUniformAfterVectorization(PredInst, VF) &&
6160          "Instruction marked uniform-after-vectorization will be predicated");
6161 
6162   // Initialize the discount to zero, meaning that the scalar version and the
6163   // vector version cost the same.
6164   InstructionCost Discount = 0;
6165 
6166   // Holds instructions to analyze. The instructions we visit are mapped in
6167   // ScalarCosts. Those instructions are the ones that would be scalarized if
6168   // we find that the scalar version costs less.
6169   SmallVector<Instruction *, 8> Worklist;
6170 
6171   // Returns true if the given instruction can be scalarized.
6172   auto canBeScalarized = [&](Instruction *I) -> bool {
6173     // We only attempt to scalarize instructions forming a single-use chain
6174     // from the original predicated block that would otherwise be vectorized.
6175     // Although not strictly necessary, we give up on instructions we know will
6176     // already be scalar to avoid traversing chains that are unlikely to be
6177     // beneficial.
6178     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6179         isScalarAfterVectorization(I, VF))
6180       return false;
6181 
6182     // If the instruction is scalar with predication, it will be analyzed
6183     // separately. We ignore it within the context of PredInst.
6184     if (isScalarWithPredication(I, VF))
6185       return false;
6186 
6187     // If any of the instruction's operands are uniform after vectorization,
6188     // the instruction cannot be scalarized. This prevents, for example, a
6189     // masked load from being scalarized.
6190     //
6191     // We assume we will only emit a value for lane zero of an instruction
6192     // marked uniform after vectorization, rather than VF identical values.
6193     // Thus, if we scalarize an instruction that uses a uniform, we would
6194     // create uses of values corresponding to the lanes we aren't emitting code
6195     // for. This behavior can be changed by allowing getScalarValue to clone
6196     // the lane zero values for uniforms rather than asserting.
6197     for (Use &U : I->operands())
6198       if (auto *J = dyn_cast<Instruction>(U.get()))
6199         if (isUniformAfterVectorization(J, VF))
6200           return false;
6201 
6202     // Otherwise, we can scalarize the instruction.
6203     return true;
6204   };
6205 
6206   // Compute the expected cost discount from scalarizing the entire expression
6207   // feeding the predicated instruction. We currently only consider expressions
6208   // that are single-use instruction chains.
6209   Worklist.push_back(PredInst);
6210   while (!Worklist.empty()) {
6211     Instruction *I = Worklist.pop_back_val();
6212 
6213     // If we've already analyzed the instruction, there's nothing to do.
6214     if (ScalarCosts.find(I) != ScalarCosts.end())
6215       continue;
6216 
6217     // Compute the cost of the vector instruction. Note that this cost already
6218     // includes the scalarization overhead of the predicated instruction.
6219     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6220 
6221     // Compute the cost of the scalarized instruction. This cost is the cost of
6222     // the instruction as if it wasn't if-converted and instead remained in the
6223     // predicated block. We will scale this cost by block probability after
6224     // computing the scalarization overhead.
6225     InstructionCost ScalarCost =
6226         VF.getFixedValue() *
6227         getInstructionCost(I, ElementCount::getFixed(1)).first;
6228 
6229     // Compute the scalarization overhead of needed insertelement instructions
6230     // and phi nodes.
6231     if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
6232       ScalarCost += TTI.getScalarizationOverhead(
6233           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6234           APInt::getAllOnes(VF.getFixedValue()), true, false);
6235       ScalarCost +=
6236           VF.getFixedValue() *
6237           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6238     }
6239 
6240     // Compute the scalarization overhead of needed extractelement
6241     // instructions. For each of the instruction's operands, if the operand can
6242     // be scalarized, add it to the worklist; otherwise, account for the
6243     // overhead.
6244     for (Use &U : I->operands())
6245       if (auto *J = dyn_cast<Instruction>(U.get())) {
6246         assert(VectorType::isValidElementType(J->getType()) &&
6247                "Instruction has non-scalar type");
6248         if (canBeScalarized(J))
6249           Worklist.push_back(J);
6250         else if (needsExtract(J, VF)) {
6251           ScalarCost += TTI.getScalarizationOverhead(
6252               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6253               APInt::getAllOnes(VF.getFixedValue()), false, true);
6254         }
6255       }
6256 
6257     // Scale the total scalar cost by block probability.
6258     ScalarCost /= getReciprocalPredBlockProb();
6259 
6260     // Compute the discount. A non-negative discount means the vector version
6261     // of the instruction costs more, and scalarizing would be beneficial.
6262     Discount += VectorCost - ScalarCost;
6263     ScalarCosts[I] = ScalarCost;
6264   }
6265 
6266   return *Discount.getValue();
6267 }
6268 
6269 LoopVectorizationCostModel::VectorizationCostTy
6270 LoopVectorizationCostModel::expectedCost(
6271     ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) {
6272   VectorizationCostTy Cost;
6273 
6274   // For each block.
6275   for (BasicBlock *BB : TheLoop->blocks()) {
6276     VectorizationCostTy BlockCost;
6277 
6278     // For each instruction in the old loop.
6279     for (Instruction &I : BB->instructionsWithoutDebug()) {
6280       // Skip ignored values.
6281       if (ValuesToIgnore.count(&I) ||
6282           (VF.isVector() && VecValuesToIgnore.count(&I)))
6283         continue;
6284 
6285       VectorizationCostTy C = getInstructionCost(&I, VF);
6286 
6287       // Check if we should override the cost.
6288       if (C.first.isValid() &&
6289           ForceTargetInstructionCost.getNumOccurrences() > 0)
6290         C.first = InstructionCost(ForceTargetInstructionCost);
6291 
6292       // Keep a list of instructions with invalid costs.
6293       if (Invalid && !C.first.isValid())
6294         Invalid->emplace_back(&I, VF);
6295 
6296       BlockCost.first += C.first;
6297       BlockCost.second |= C.second;
6298       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6299                         << " for VF " << VF << " For instruction: " << I
6300                         << '\n');
6301     }
6302 
6303     // If we are vectorizing a predicated block, it will have been
6304     // if-converted. This means that the block's instructions (aside from
6305     // stores and instructions that may divide by zero) will now be
6306     // unconditionally executed. For the scalar case, we may not always execute
6307     // the predicated block, if it is an if-else block. Thus, scale the block's
6308     // cost by the probability of executing it. blockNeedsPredication from
6309     // Legal is used so as to not include all blocks in tail folded loops.
6310     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6311       BlockCost.first /= getReciprocalPredBlockProb();
6312 
6313     Cost.first += BlockCost.first;
6314     Cost.second |= BlockCost.second;
6315   }
6316 
6317   return Cost;
6318 }
6319 
6320 /// Gets Address Access SCEV after verifying that the access pattern
6321 /// is loop invariant except the induction variable dependence.
6322 ///
6323 /// This SCEV can be sent to the Target in order to estimate the address
6324 /// calculation cost.
6325 static const SCEV *getAddressAccessSCEV(
6326               Value *Ptr,
6327               LoopVectorizationLegality *Legal,
6328               PredicatedScalarEvolution &PSE,
6329               const Loop *TheLoop) {
6330 
6331   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6332   if (!Gep)
6333     return nullptr;
6334 
6335   // We are looking for a gep with all loop invariant indices except for one
6336   // which should be an induction variable.
6337   auto SE = PSE.getSE();
6338   unsigned NumOperands = Gep->getNumOperands();
6339   for (unsigned i = 1; i < NumOperands; ++i) {
6340     Value *Opd = Gep->getOperand(i);
6341     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6342         !Legal->isInductionVariable(Opd))
6343       return nullptr;
6344   }
6345 
6346   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6347   return PSE.getSCEV(Ptr);
6348 }
6349 
6350 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6351   return Legal->hasStride(I->getOperand(0)) ||
6352          Legal->hasStride(I->getOperand(1));
6353 }
6354 
6355 InstructionCost
6356 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6357                                                         ElementCount VF) {
6358   assert(VF.isVector() &&
6359          "Scalarization cost of instruction implies vectorization.");
6360   if (VF.isScalable())
6361     return InstructionCost::getInvalid();
6362 
6363   Type *ValTy = getLoadStoreType(I);
6364   auto SE = PSE.getSE();
6365 
6366   unsigned AS = getLoadStoreAddressSpace(I);
6367   Value *Ptr = getLoadStorePointerOperand(I);
6368   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6369   // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
6370   //       that it is being called from this specific place.
6371 
6372   // Figure out whether the access is strided and get the stride value
6373   // if it's known in compile time
6374   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6375 
6376   // Get the cost of the scalar memory instruction and address computation.
6377   InstructionCost Cost =
6378       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6379 
6380   // Don't pass *I here, since it is scalar but will actually be part of a
6381   // vectorized loop where the user of it is a vectorized instruction.
6382   const Align Alignment = getLoadStoreAlignment(I);
6383   Cost += VF.getKnownMinValue() *
6384           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6385                               AS, TTI::TCK_RecipThroughput);
6386 
6387   // Get the overhead of the extractelement and insertelement instructions
6388   // we might create due to scalarization.
6389   Cost += getScalarizationOverhead(I, VF);
6390 
6391   // If we have a predicated load/store, it will need extra i1 extracts and
6392   // conditional branches, but may not be executed for each vector lane. Scale
6393   // the cost by the probability of executing the predicated block.
6394   if (isPredicatedInst(I, VF)) {
6395     Cost /= getReciprocalPredBlockProb();
6396 
6397     // Add the cost of an i1 extract and a branch
6398     auto *Vec_i1Ty =
6399         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
6400     Cost += TTI.getScalarizationOverhead(
6401         Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()),
6402         /*Insert=*/false, /*Extract=*/true);
6403     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
6404 
6405     if (useEmulatedMaskMemRefHack(I, VF))
6406       // Artificially setting to a high enough value to practically disable
6407       // vectorization with such operations.
6408       Cost = 3000000;
6409   }
6410 
6411   return Cost;
6412 }
6413 
6414 InstructionCost
6415 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6416                                                     ElementCount VF) {
6417   Type *ValTy = getLoadStoreType(I);
6418   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6419   Value *Ptr = getLoadStorePointerOperand(I);
6420   unsigned AS = getLoadStoreAddressSpace(I);
6421   int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
6422   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6423 
6424   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6425          "Stride should be 1 or -1 for consecutive memory access");
6426   const Align Alignment = getLoadStoreAlignment(I);
6427   InstructionCost Cost = 0;
6428   if (Legal->isMaskRequired(I))
6429     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6430                                       CostKind);
6431   else
6432     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6433                                 CostKind, I);
6434 
6435   bool Reverse = ConsecutiveStride < 0;
6436   if (Reverse)
6437     Cost +=
6438         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6439   return Cost;
6440 }
6441 
6442 InstructionCost
6443 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6444                                                 ElementCount VF) {
6445   assert(Legal->isUniformMemOp(*I));
6446 
6447   Type *ValTy = getLoadStoreType(I);
6448   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6449   const Align Alignment = getLoadStoreAlignment(I);
6450   unsigned AS = getLoadStoreAddressSpace(I);
6451   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6452   if (isa<LoadInst>(I)) {
6453     return TTI.getAddressComputationCost(ValTy) +
6454            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6455                                CostKind) +
6456            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6457   }
6458   StoreInst *SI = cast<StoreInst>(I);
6459 
6460   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6461   return TTI.getAddressComputationCost(ValTy) +
6462          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6463                              CostKind) +
6464          (isLoopInvariantStoreValue
6465               ? 0
6466               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6467                                        VF.getKnownMinValue() - 1));
6468 }
6469 
6470 InstructionCost
6471 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6472                                                  ElementCount VF) {
6473   Type *ValTy = getLoadStoreType(I);
6474   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6475   const Align Alignment = getLoadStoreAlignment(I);
6476   const Value *Ptr = getLoadStorePointerOperand(I);
6477 
6478   return TTI.getAddressComputationCost(VectorTy) +
6479          TTI.getGatherScatterOpCost(
6480              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6481              TargetTransformInfo::TCK_RecipThroughput, I);
6482 }
6483 
6484 InstructionCost
6485 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6486                                                    ElementCount VF) {
6487   // TODO: Once we have support for interleaving with scalable vectors
6488   // we can calculate the cost properly here.
6489   if (VF.isScalable())
6490     return InstructionCost::getInvalid();
6491 
6492   Type *ValTy = getLoadStoreType(I);
6493   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6494   unsigned AS = getLoadStoreAddressSpace(I);
6495 
6496   auto Group = getInterleavedAccessGroup(I);
6497   assert(Group && "Fail to get an interleaved access group.");
6498 
6499   unsigned InterleaveFactor = Group->getFactor();
6500   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6501 
6502   // Holds the indices of existing members in the interleaved group.
6503   SmallVector<unsigned, 4> Indices;
6504   for (unsigned IF = 0; IF < InterleaveFactor; IF++)
6505     if (Group->getMember(IF))
6506       Indices.push_back(IF);
6507 
6508   // Calculate the cost of the whole interleaved group.
6509   bool UseMaskForGaps =
6510       (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
6511       (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()));
6512   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
6513       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6514       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6515 
6516   if (Group->isReverse()) {
6517     // TODO: Add support for reversed masked interleaved access.
6518     assert(!Legal->isMaskRequired(I) &&
6519            "Reverse masked interleaved access not supported.");
6520     Cost +=
6521         Group->getNumMembers() *
6522         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6523   }
6524   return Cost;
6525 }
6526 
6527 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost(
6528     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
6529   using namespace llvm::PatternMatch;
6530   // Early exit for no inloop reductions
6531   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
6532     return None;
6533   auto *VectorTy = cast<VectorType>(Ty);
6534 
6535   // We are looking for a pattern of, and finding the minimal acceptable cost:
6536   //  reduce(mul(ext(A), ext(B))) or
6537   //  reduce(mul(A, B)) or
6538   //  reduce(ext(A)) or
6539   //  reduce(A).
6540   // The basic idea is that we walk down the tree to do that, finding the root
6541   // reduction instruction in InLoopReductionImmediateChains. From there we find
6542   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
6543   // of the components. If the reduction cost is lower then we return it for the
6544   // reduction instruction and 0 for the other instructions in the pattern. If
6545   // it is not we return an invalid cost specifying the orignal cost method
6546   // should be used.
6547   Instruction *RetI = I;
6548   if (match(RetI, m_ZExtOrSExt(m_Value()))) {
6549     if (!RetI->hasOneUser())
6550       return None;
6551     RetI = RetI->user_back();
6552   }
6553   if (match(RetI, m_Mul(m_Value(), m_Value())) &&
6554       RetI->user_back()->getOpcode() == Instruction::Add) {
6555     if (!RetI->hasOneUser())
6556       return None;
6557     RetI = RetI->user_back();
6558   }
6559 
6560   // Test if the found instruction is a reduction, and if not return an invalid
6561   // cost specifying the parent to use the original cost modelling.
6562   if (!InLoopReductionImmediateChains.count(RetI))
6563     return None;
6564 
6565   // Find the reduction this chain is a part of and calculate the basic cost of
6566   // the reduction on its own.
6567   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
6568   Instruction *ReductionPhi = LastChain;
6569   while (!isa<PHINode>(ReductionPhi))
6570     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
6571 
6572   const RecurrenceDescriptor &RdxDesc =
6573       Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second;
6574 
6575   InstructionCost BaseCost = TTI.getArithmeticReductionCost(
6576       RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
6577 
6578   // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
6579   // normal fmul instruction to the cost of the fadd reduction.
6580   if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd)
6581     BaseCost +=
6582         TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
6583 
6584   // If we're using ordered reductions then we can just return the base cost
6585   // here, since getArithmeticReductionCost calculates the full ordered
6586   // reduction cost when FP reassociation is not allowed.
6587   if (useOrderedReductions(RdxDesc))
6588     return BaseCost;
6589 
6590   // Get the operand that was not the reduction chain and match it to one of the
6591   // patterns, returning the better cost if it is found.
6592   Instruction *RedOp = RetI->getOperand(1) == LastChain
6593                            ? dyn_cast<Instruction>(RetI->getOperand(0))
6594                            : dyn_cast<Instruction>(RetI->getOperand(1));
6595 
6596   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
6597 
6598   Instruction *Op0, *Op1;
6599   if (RedOp &&
6600       match(RedOp,
6601             m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) &&
6602       match(Op0, m_ZExtOrSExt(m_Value())) &&
6603       Op0->getOpcode() == Op1->getOpcode() &&
6604       Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
6605       !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
6606       (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
6607 
6608     // Matched reduce(ext(mul(ext(A), ext(B)))
6609     // Note that the extend opcodes need to all match, or if A==B they will have
6610     // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
6611     // which is equally fine.
6612     bool IsUnsigned = isa<ZExtInst>(Op0);
6613     auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
6614     auto *MulType = VectorType::get(Op0->getType(), VectorTy);
6615 
6616     InstructionCost ExtCost =
6617         TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
6618                              TTI::CastContextHint::None, CostKind, Op0);
6619     InstructionCost MulCost =
6620         TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
6621     InstructionCost Ext2Cost =
6622         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
6623                              TTI::CastContextHint::None, CostKind, RedOp);
6624 
6625     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6626         /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6627         CostKind);
6628 
6629     if (RedCost.isValid() &&
6630         RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
6631       return I == RetI ? RedCost : 0;
6632   } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
6633              !TheLoop->isLoopInvariant(RedOp)) {
6634     // Matched reduce(ext(A))
6635     bool IsUnsigned = isa<ZExtInst>(RedOp);
6636     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
6637     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6638         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6639         CostKind);
6640 
6641     InstructionCost ExtCost =
6642         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
6643                              TTI::CastContextHint::None, CostKind, RedOp);
6644     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
6645       return I == RetI ? RedCost : 0;
6646   } else if (RedOp &&
6647              match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
6648     if (match(Op0, m_ZExtOrSExt(m_Value())) &&
6649         Op0->getOpcode() == Op1->getOpcode() &&
6650         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
6651       bool IsUnsigned = isa<ZExtInst>(Op0);
6652       Type *Op0Ty = Op0->getOperand(0)->getType();
6653       Type *Op1Ty = Op1->getOperand(0)->getType();
6654       Type *LargestOpTy =
6655           Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty
6656                                                                     : Op0Ty;
6657       auto *ExtType = VectorType::get(LargestOpTy, VectorTy);
6658 
6659       // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of
6660       // different sizes. We take the largest type as the ext to reduce, and add
6661       // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))).
6662       InstructionCost ExtCost0 = TTI.getCastInstrCost(
6663           Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy),
6664           TTI::CastContextHint::None, CostKind, Op0);
6665       InstructionCost ExtCost1 = TTI.getCastInstrCost(
6666           Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy),
6667           TTI::CastContextHint::None, CostKind, Op1);
6668       InstructionCost MulCost =
6669           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6670 
6671       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6672           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
6673           CostKind);
6674       InstructionCost ExtraExtCost = 0;
6675       if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) {
6676         Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1;
6677         ExtraExtCost = TTI.getCastInstrCost(
6678             ExtraExtOp->getOpcode(), ExtType,
6679             VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy),
6680             TTI::CastContextHint::None, CostKind, ExtraExtOp);
6681       }
6682 
6683       if (RedCost.isValid() &&
6684           (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost))
6685         return I == RetI ? RedCost : 0;
6686     } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
6687       // Matched reduce(mul())
6688       InstructionCost MulCost =
6689           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
6690 
6691       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
6692           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
6693           CostKind);
6694 
6695       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
6696         return I == RetI ? RedCost : 0;
6697     }
6698   }
6699 
6700   return I == RetI ? Optional<InstructionCost>(BaseCost) : None;
6701 }
6702 
6703 InstructionCost
6704 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
6705                                                      ElementCount VF) {
6706   // Calculate scalar cost only. Vectorization cost should be ready at this
6707   // moment.
6708   if (VF.isScalar()) {
6709     Type *ValTy = getLoadStoreType(I);
6710     const Align Alignment = getLoadStoreAlignment(I);
6711     unsigned AS = getLoadStoreAddressSpace(I);
6712 
6713     return TTI.getAddressComputationCost(ValTy) +
6714            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
6715                                TTI::TCK_RecipThroughput, I);
6716   }
6717   return getWideningCost(I, VF);
6718 }
6719 
6720 LoopVectorizationCostModel::VectorizationCostTy
6721 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6722                                                ElementCount VF) {
6723   // If we know that this instruction will remain uniform, check the cost of
6724   // the scalar version.
6725   if (isUniformAfterVectorization(I, VF))
6726     VF = ElementCount::getFixed(1);
6727 
6728   if (VF.isVector() && isProfitableToScalarize(I, VF))
6729     return VectorizationCostTy(InstsToScalarize[VF][I], false);
6730 
6731   // Forced scalars do not have any scalarization overhead.
6732   auto ForcedScalar = ForcedScalars.find(VF);
6733   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
6734     auto InstSet = ForcedScalar->second;
6735     if (InstSet.count(I))
6736       return VectorizationCostTy(
6737           (getInstructionCost(I, ElementCount::getFixed(1)).first *
6738            VF.getKnownMinValue()),
6739           false);
6740   }
6741 
6742   Type *VectorTy;
6743   InstructionCost C = getInstructionCost(I, VF, VectorTy);
6744 
6745   bool TypeNotScalarized = false;
6746   if (VF.isVector() && VectorTy->isVectorTy()) {
6747     unsigned NumParts = TTI.getNumberOfParts(VectorTy);
6748     if (NumParts)
6749       TypeNotScalarized = NumParts < VF.getKnownMinValue();
6750     else
6751       C = InstructionCost::getInvalid();
6752   }
6753   return VectorizationCostTy(C, TypeNotScalarized);
6754 }
6755 
6756 InstructionCost
6757 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
6758                                                      ElementCount VF) const {
6759 
6760   // There is no mechanism yet to create a scalable scalarization loop,
6761   // so this is currently Invalid.
6762   if (VF.isScalable())
6763     return InstructionCost::getInvalid();
6764 
6765   if (VF.isScalar())
6766     return 0;
6767 
6768   InstructionCost Cost = 0;
6769   Type *RetTy = ToVectorTy(I->getType(), VF);
6770   if (!RetTy->isVoidTy() &&
6771       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
6772     Cost += TTI.getScalarizationOverhead(
6773         cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true,
6774         false);
6775 
6776   // Some targets keep addresses scalar.
6777   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
6778     return Cost;
6779 
6780   // Some targets support efficient element stores.
6781   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
6782     return Cost;
6783 
6784   // Collect operands to consider.
6785   CallInst *CI = dyn_cast<CallInst>(I);
6786   Instruction::op_range Ops = CI ? CI->args() : I->operands();
6787 
6788   // Skip operands that do not require extraction/scalarization and do not incur
6789   // any overhead.
6790   SmallVector<Type *> Tys;
6791   for (auto *V : filterExtractingOperands(Ops, VF))
6792     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
6793   return Cost + TTI.getOperandsScalarizationOverhead(
6794                     filterExtractingOperands(Ops, VF), Tys);
6795 }
6796 
6797 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
6798   if (VF.isScalar())
6799     return;
6800   NumPredStores = 0;
6801   for (BasicBlock *BB : TheLoop->blocks()) {
6802     // For each instruction in the old loop.
6803     for (Instruction &I : *BB) {
6804       Value *Ptr =  getLoadStorePointerOperand(&I);
6805       if (!Ptr)
6806         continue;
6807 
6808       // TODO: We should generate better code and update the cost model for
6809       // predicated uniform stores. Today they are treated as any other
6810       // predicated store (see added test cases in
6811       // invariant-store-vectorization.ll).
6812       if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF))
6813         NumPredStores++;
6814 
6815       if (Legal->isUniformMemOp(I)) {
6816         // TODO: Avoid replicating loads and stores instead of
6817         // relying on instcombine to remove them.
6818         // Load: Scalar load + broadcast
6819         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
6820         InstructionCost Cost;
6821         if (isa<StoreInst>(&I) && VF.isScalable() &&
6822             isLegalGatherOrScatter(&I, VF)) {
6823           Cost = getGatherScatterCost(&I, VF);
6824           setWideningDecision(&I, VF, CM_GatherScatter, Cost);
6825         } else {
6826           assert((isa<LoadInst>(&I) || !VF.isScalable()) &&
6827                  "Cannot yet scalarize uniform stores");
6828           Cost = getUniformMemOpCost(&I, VF);
6829           setWideningDecision(&I, VF, CM_Scalarize, Cost);
6830         }
6831         continue;
6832       }
6833 
6834       // We assume that widening is the best solution when possible.
6835       if (memoryInstructionCanBeWidened(&I, VF)) {
6836         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
6837         int ConsecutiveStride = Legal->isConsecutivePtr(
6838             getLoadStoreType(&I), getLoadStorePointerOperand(&I));
6839         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6840                "Expected consecutive stride.");
6841         InstWidening Decision =
6842             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
6843         setWideningDecision(&I, VF, Decision, Cost);
6844         continue;
6845       }
6846 
6847       // Choose between Interleaving, Gather/Scatter or Scalarization.
6848       InstructionCost InterleaveCost = InstructionCost::getInvalid();
6849       unsigned NumAccesses = 1;
6850       if (isAccessInterleaved(&I)) {
6851         auto Group = getInterleavedAccessGroup(&I);
6852         assert(Group && "Fail to get an interleaved access group.");
6853 
6854         // Make one decision for the whole group.
6855         if (getWideningDecision(&I, VF) != CM_Unknown)
6856           continue;
6857 
6858         NumAccesses = Group->getNumMembers();
6859         if (interleavedAccessCanBeWidened(&I, VF))
6860           InterleaveCost = getInterleaveGroupCost(&I, VF);
6861       }
6862 
6863       InstructionCost GatherScatterCost =
6864           isLegalGatherOrScatter(&I, VF)
6865               ? getGatherScatterCost(&I, VF) * NumAccesses
6866               : InstructionCost::getInvalid();
6867 
6868       InstructionCost ScalarizationCost =
6869           getMemInstScalarizationCost(&I, VF) * NumAccesses;
6870 
6871       // Choose better solution for the current VF,
6872       // write down this decision and use it during vectorization.
6873       InstructionCost Cost;
6874       InstWidening Decision;
6875       if (InterleaveCost <= GatherScatterCost &&
6876           InterleaveCost < ScalarizationCost) {
6877         Decision = CM_Interleave;
6878         Cost = InterleaveCost;
6879       } else if (GatherScatterCost < ScalarizationCost) {
6880         Decision = CM_GatherScatter;
6881         Cost = GatherScatterCost;
6882       } else {
6883         Decision = CM_Scalarize;
6884         Cost = ScalarizationCost;
6885       }
6886       // If the instructions belongs to an interleave group, the whole group
6887       // receives the same decision. The whole group receives the cost, but
6888       // the cost will actually be assigned to one instruction.
6889       if (auto Group = getInterleavedAccessGroup(&I))
6890         setWideningDecision(Group, VF, Decision, Cost);
6891       else
6892         setWideningDecision(&I, VF, Decision, Cost);
6893     }
6894   }
6895 
6896   // Make sure that any load of address and any other address computation
6897   // remains scalar unless there is gather/scatter support. This avoids
6898   // inevitable extracts into address registers, and also has the benefit of
6899   // activating LSR more, since that pass can't optimize vectorized
6900   // addresses.
6901   if (TTI.prefersVectorizedAddressing())
6902     return;
6903 
6904   // Start with all scalar pointer uses.
6905   SmallPtrSet<Instruction *, 8> AddrDefs;
6906   for (BasicBlock *BB : TheLoop->blocks())
6907     for (Instruction &I : *BB) {
6908       Instruction *PtrDef =
6909         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
6910       if (PtrDef && TheLoop->contains(PtrDef) &&
6911           getWideningDecision(&I, VF) != CM_GatherScatter)
6912         AddrDefs.insert(PtrDef);
6913     }
6914 
6915   // Add all instructions used to generate the addresses.
6916   SmallVector<Instruction *, 4> Worklist;
6917   append_range(Worklist, AddrDefs);
6918   while (!Worklist.empty()) {
6919     Instruction *I = Worklist.pop_back_val();
6920     for (auto &Op : I->operands())
6921       if (auto *InstOp = dyn_cast<Instruction>(Op))
6922         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
6923             AddrDefs.insert(InstOp).second)
6924           Worklist.push_back(InstOp);
6925   }
6926 
6927   for (auto *I : AddrDefs) {
6928     if (isa<LoadInst>(I)) {
6929       // Setting the desired widening decision should ideally be handled in
6930       // by cost functions, but since this involves the task of finding out
6931       // if the loaded register is involved in an address computation, it is
6932       // instead changed here when we know this is the case.
6933       InstWidening Decision = getWideningDecision(I, VF);
6934       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
6935         // Scalarize a widened load of address.
6936         setWideningDecision(
6937             I, VF, CM_Scalarize,
6938             (VF.getKnownMinValue() *
6939              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
6940       else if (auto Group = getInterleavedAccessGroup(I)) {
6941         // Scalarize an interleave group of address loads.
6942         for (unsigned I = 0; I < Group->getFactor(); ++I) {
6943           if (Instruction *Member = Group->getMember(I))
6944             setWideningDecision(
6945                 Member, VF, CM_Scalarize,
6946                 (VF.getKnownMinValue() *
6947                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
6948         }
6949       }
6950     } else
6951       // Make sure I gets scalarized and a cost estimate without
6952       // scalarization overhead.
6953       ForcedScalars[VF].insert(I);
6954   }
6955 }
6956 
6957 InstructionCost
6958 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
6959                                                Type *&VectorTy) {
6960   Type *RetTy = I->getType();
6961   if (canTruncateToMinimalBitwidth(I, VF))
6962     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
6963   auto SE = PSE.getSE();
6964   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6965 
6966   auto hasSingleCopyAfterVectorization = [this](Instruction *I,
6967                                                 ElementCount VF) -> bool {
6968     if (VF.isScalar())
6969       return true;
6970 
6971     auto Scalarized = InstsToScalarize.find(VF);
6972     assert(Scalarized != InstsToScalarize.end() &&
6973            "VF not yet analyzed for scalarization profitability");
6974     return !Scalarized->second.count(I) &&
6975            llvm::all_of(I->users(), [&](User *U) {
6976              auto *UI = cast<Instruction>(U);
6977              return !Scalarized->second.count(UI);
6978            });
6979   };
6980   (void) hasSingleCopyAfterVectorization;
6981 
6982   if (isScalarAfterVectorization(I, VF)) {
6983     // With the exception of GEPs and PHIs, after scalarization there should
6984     // only be one copy of the instruction generated in the loop. This is
6985     // because the VF is either 1, or any instructions that need scalarizing
6986     // have already been dealt with by the the time we get here. As a result,
6987     // it means we don't have to multiply the instruction cost by VF.
6988     assert(I->getOpcode() == Instruction::GetElementPtr ||
6989            I->getOpcode() == Instruction::PHI ||
6990            (I->getOpcode() == Instruction::BitCast &&
6991             I->getType()->isPointerTy()) ||
6992            hasSingleCopyAfterVectorization(I, VF));
6993     VectorTy = RetTy;
6994   } else
6995     VectorTy = ToVectorTy(RetTy, VF);
6996 
6997   // TODO: We need to estimate the cost of intrinsic calls.
6998   switch (I->getOpcode()) {
6999   case Instruction::GetElementPtr:
7000     // We mark this instruction as zero-cost because the cost of GEPs in
7001     // vectorized code depends on whether the corresponding memory instruction
7002     // is scalarized or not. Therefore, we handle GEPs with the memory
7003     // instruction cost.
7004     return 0;
7005   case Instruction::Br: {
7006     // In cases of scalarized and predicated instructions, there will be VF
7007     // predicated blocks in the vectorized loop. Each branch around these
7008     // blocks requires also an extract of its vector compare i1 element.
7009     bool ScalarPredicatedBB = false;
7010     BranchInst *BI = cast<BranchInst>(I);
7011     if (VF.isVector() && BI->isConditional() &&
7012         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7013          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7014       ScalarPredicatedBB = true;
7015 
7016     if (ScalarPredicatedBB) {
7017       // Not possible to scalarize scalable vector with predicated instructions.
7018       if (VF.isScalable())
7019         return InstructionCost::getInvalid();
7020       // Return cost for branches around scalarized and predicated blocks.
7021       auto *Vec_i1Ty =
7022           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7023       return (
7024           TTI.getScalarizationOverhead(
7025               Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) +
7026           (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
7027     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7028       // The back-edge branch will remain, as will all scalar branches.
7029       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7030     else
7031       // This branch will be eliminated by if-conversion.
7032       return 0;
7033     // Note: We currently assume zero cost for an unconditional branch inside
7034     // a predicated block since it will become a fall-through, although we
7035     // may decide in the future to call TTI for all branches.
7036   }
7037   case Instruction::PHI: {
7038     auto *Phi = cast<PHINode>(I);
7039 
7040     // First-order recurrences are replaced by vector shuffles inside the loop.
7041     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7042     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7043       return TTI.getShuffleCost(
7044           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7045           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7046 
7047     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7048     // converted into select instructions. We require N - 1 selects per phi
7049     // node, where N is the number of incoming values.
7050     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7051       return (Phi->getNumIncomingValues() - 1) *
7052              TTI.getCmpSelInstrCost(
7053                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7054                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7055                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7056 
7057     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7058   }
7059   case Instruction::UDiv:
7060   case Instruction::SDiv:
7061   case Instruction::URem:
7062   case Instruction::SRem:
7063     // If we have a predicated instruction, it may not be executed for each
7064     // vector lane. Get the scalarization cost and scale this amount by the
7065     // probability of executing the predicated block. If the instruction is not
7066     // predicated, we fall through to the next case.
7067     if (VF.isVector() && isScalarWithPredication(I, VF)) {
7068       InstructionCost Cost = 0;
7069 
7070       // These instructions have a non-void type, so account for the phi nodes
7071       // that we will create. This cost is likely to be zero. The phi node
7072       // cost, if any, should be scaled by the block probability because it
7073       // models a copy at the end of each predicated block.
7074       Cost += VF.getKnownMinValue() *
7075               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7076 
7077       // The cost of the non-predicated instruction.
7078       Cost += VF.getKnownMinValue() *
7079               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7080 
7081       // The cost of insertelement and extractelement instructions needed for
7082       // scalarization.
7083       Cost += getScalarizationOverhead(I, VF);
7084 
7085       // Scale the cost by the probability of executing the predicated blocks.
7086       // This assumes the predicated block for each vector lane is equally
7087       // likely.
7088       return Cost / getReciprocalPredBlockProb();
7089     }
7090     LLVM_FALLTHROUGH;
7091   case Instruction::Add:
7092   case Instruction::FAdd:
7093   case Instruction::Sub:
7094   case Instruction::FSub:
7095   case Instruction::Mul:
7096   case Instruction::FMul:
7097   case Instruction::FDiv:
7098   case Instruction::FRem:
7099   case Instruction::Shl:
7100   case Instruction::LShr:
7101   case Instruction::AShr:
7102   case Instruction::And:
7103   case Instruction::Or:
7104   case Instruction::Xor: {
7105     // Since we will replace the stride by 1 the multiplication should go away.
7106     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7107       return 0;
7108 
7109     // Detect reduction patterns
7110     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7111       return *RedCost;
7112 
7113     // Certain instructions can be cheaper to vectorize if they have a constant
7114     // second vector operand. One example of this are shifts on x86.
7115     Value *Op2 = I->getOperand(1);
7116     TargetTransformInfo::OperandValueProperties Op2VP;
7117     TargetTransformInfo::OperandValueKind Op2VK =
7118         TTI.getOperandInfo(Op2, Op2VP);
7119     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7120       Op2VK = TargetTransformInfo::OK_UniformValue;
7121 
7122     SmallVector<const Value *, 4> Operands(I->operand_values());
7123     return TTI.getArithmeticInstrCost(
7124         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7125         Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7126   }
7127   case Instruction::FNeg: {
7128     return TTI.getArithmeticInstrCost(
7129         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7130         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None,
7131         TargetTransformInfo::OP_None, I->getOperand(0), I);
7132   }
7133   case Instruction::Select: {
7134     SelectInst *SI = cast<SelectInst>(I);
7135     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7136     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7137 
7138     const Value *Op0, *Op1;
7139     using namespace llvm::PatternMatch;
7140     if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
7141                         match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
7142       // select x, y, false --> x & y
7143       // select x, true, y --> x | y
7144       TTI::OperandValueProperties Op1VP = TTI::OP_None;
7145       TTI::OperandValueProperties Op2VP = TTI::OP_None;
7146       TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP);
7147       TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP);
7148       assert(Op0->getType()->getScalarSizeInBits() == 1 &&
7149               Op1->getType()->getScalarSizeInBits() == 1);
7150 
7151       SmallVector<const Value *, 2> Operands{Op0, Op1};
7152       return TTI.getArithmeticInstrCost(
7153           match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
7154           CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I);
7155     }
7156 
7157     Type *CondTy = SI->getCondition()->getType();
7158     if (!ScalarCond)
7159       CondTy = VectorType::get(CondTy, VF);
7160 
7161     CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
7162     if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition()))
7163       Pred = Cmp->getPredicate();
7164     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred,
7165                                   CostKind, I);
7166   }
7167   case Instruction::ICmp:
7168   case Instruction::FCmp: {
7169     Type *ValTy = I->getOperand(0)->getType();
7170     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7171     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7172       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7173     VectorTy = ToVectorTy(ValTy, VF);
7174     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7175                                   cast<CmpInst>(I)->getPredicate(), CostKind,
7176                                   I);
7177   }
7178   case Instruction::Store:
7179   case Instruction::Load: {
7180     ElementCount Width = VF;
7181     if (Width.isVector()) {
7182       InstWidening Decision = getWideningDecision(I, Width);
7183       assert(Decision != CM_Unknown &&
7184              "CM decision should be taken at this point");
7185       if (Decision == CM_Scalarize)
7186         Width = ElementCount::getFixed(1);
7187     }
7188     VectorTy = ToVectorTy(getLoadStoreType(I), Width);
7189     return getMemoryInstructionCost(I, VF);
7190   }
7191   case Instruction::BitCast:
7192     if (I->getType()->isPointerTy())
7193       return 0;
7194     LLVM_FALLTHROUGH;
7195   case Instruction::ZExt:
7196   case Instruction::SExt:
7197   case Instruction::FPToUI:
7198   case Instruction::FPToSI:
7199   case Instruction::FPExt:
7200   case Instruction::PtrToInt:
7201   case Instruction::IntToPtr:
7202   case Instruction::SIToFP:
7203   case Instruction::UIToFP:
7204   case Instruction::Trunc:
7205   case Instruction::FPTrunc: {
7206     // Computes the CastContextHint from a Load/Store instruction.
7207     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7208       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7209              "Expected a load or a store!");
7210 
7211       if (VF.isScalar() || !TheLoop->contains(I))
7212         return TTI::CastContextHint::Normal;
7213 
7214       switch (getWideningDecision(I, VF)) {
7215       case LoopVectorizationCostModel::CM_GatherScatter:
7216         return TTI::CastContextHint::GatherScatter;
7217       case LoopVectorizationCostModel::CM_Interleave:
7218         return TTI::CastContextHint::Interleave;
7219       case LoopVectorizationCostModel::CM_Scalarize:
7220       case LoopVectorizationCostModel::CM_Widen:
7221         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7222                                         : TTI::CastContextHint::Normal;
7223       case LoopVectorizationCostModel::CM_Widen_Reverse:
7224         return TTI::CastContextHint::Reversed;
7225       case LoopVectorizationCostModel::CM_Unknown:
7226         llvm_unreachable("Instr did not go through cost modelling?");
7227       }
7228 
7229       llvm_unreachable("Unhandled case!");
7230     };
7231 
7232     unsigned Opcode = I->getOpcode();
7233     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7234     // For Trunc, the context is the only user, which must be a StoreInst.
7235     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7236       if (I->hasOneUse())
7237         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7238           CCH = ComputeCCH(Store);
7239     }
7240     // For Z/Sext, the context is the operand, which must be a LoadInst.
7241     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7242              Opcode == Instruction::FPExt) {
7243       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7244         CCH = ComputeCCH(Load);
7245     }
7246 
7247     // We optimize the truncation of induction variables having constant
7248     // integer steps. The cost of these truncations is the same as the scalar
7249     // operation.
7250     if (isOptimizableIVTruncate(I, VF)) {
7251       auto *Trunc = cast<TruncInst>(I);
7252       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7253                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7254     }
7255 
7256     // Detect reduction patterns
7257     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7258       return *RedCost;
7259 
7260     Type *SrcScalarTy = I->getOperand(0)->getType();
7261     Type *SrcVecTy =
7262         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7263     if (canTruncateToMinimalBitwidth(I, VF)) {
7264       // This cast is going to be shrunk. This may remove the cast or it might
7265       // turn it into slightly different cast. For example, if MinBW == 16,
7266       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7267       //
7268       // Calculate the modified src and dest types.
7269       Type *MinVecTy = VectorTy;
7270       if (Opcode == Instruction::Trunc) {
7271         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7272         VectorTy =
7273             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7274       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7275         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7276         VectorTy =
7277             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7278       }
7279     }
7280 
7281     return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7282   }
7283   case Instruction::Call: {
7284     if (RecurrenceDescriptor::isFMulAddIntrinsic(I))
7285       if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7286         return *RedCost;
7287     bool NeedToScalarize;
7288     CallInst *CI = cast<CallInst>(I);
7289     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7290     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7291       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7292       return std::min(CallCost, IntrinsicCost);
7293     }
7294     return CallCost;
7295   }
7296   case Instruction::ExtractValue:
7297     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7298   case Instruction::Alloca:
7299     // We cannot easily widen alloca to a scalable alloca, as
7300     // the result would need to be a vector of pointers.
7301     if (VF.isScalable())
7302       return InstructionCost::getInvalid();
7303     LLVM_FALLTHROUGH;
7304   default:
7305     // This opcode is unknown. Assume that it is the same as 'mul'.
7306     return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7307   } // end of switch.
7308 }
7309 
7310 char LoopVectorize::ID = 0;
7311 
7312 static const char lv_name[] = "Loop Vectorization";
7313 
7314 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7315 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7316 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7317 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7318 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7319 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7320 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7321 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7322 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7323 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7324 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7325 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7326 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7327 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7328 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7329 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7330 
7331 namespace llvm {
7332 
7333 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7334 
7335 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7336                               bool VectorizeOnlyWhenForced) {
7337   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7338 }
7339 
7340 } // end namespace llvm
7341 
7342 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7343   // Check if the pointer operand of a load or store instruction is
7344   // consecutive.
7345   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7346     return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr);
7347   return false;
7348 }
7349 
7350 void LoopVectorizationCostModel::collectValuesToIgnore() {
7351   // Ignore ephemeral values.
7352   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7353 
7354   // Find all stores to invariant variables. Since they are going to sink
7355   // outside the loop we do not need calculate cost for them.
7356   for (BasicBlock *BB : TheLoop->blocks())
7357     for (Instruction &I : *BB) {
7358       StoreInst *SI;
7359       if ((SI = dyn_cast<StoreInst>(&I)) &&
7360           Legal->isInvariantAddressOfReduction(SI->getPointerOperand()))
7361         ValuesToIgnore.insert(&I);
7362     }
7363 
7364   // Ignore type-promoting instructions we identified during reduction
7365   // detection.
7366   for (auto &Reduction : Legal->getReductionVars()) {
7367     const RecurrenceDescriptor &RedDes = Reduction.second;
7368     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7369     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7370   }
7371   // Ignore type-casting instructions we identified during induction
7372   // detection.
7373   for (auto &Induction : Legal->getInductionVars()) {
7374     const InductionDescriptor &IndDes = Induction.second;
7375     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7376     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7377   }
7378 }
7379 
7380 void LoopVectorizationCostModel::collectInLoopReductions() {
7381   for (auto &Reduction : Legal->getReductionVars()) {
7382     PHINode *Phi = Reduction.first;
7383     const RecurrenceDescriptor &RdxDesc = Reduction.second;
7384 
7385     // We don't collect reductions that are type promoted (yet).
7386     if (RdxDesc.getRecurrenceType() != Phi->getType())
7387       continue;
7388 
7389     // If the target would prefer this reduction to happen "in-loop", then we
7390     // want to record it as such.
7391     unsigned Opcode = RdxDesc.getOpcode();
7392     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
7393         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7394                                    TargetTransformInfo::ReductionFlags()))
7395       continue;
7396 
7397     // Check that we can correctly put the reductions into the loop, by
7398     // finding the chain of operations that leads from the phi to the loop
7399     // exit value.
7400     SmallVector<Instruction *, 4> ReductionOperations =
7401         RdxDesc.getReductionOpChain(Phi, TheLoop);
7402     bool InLoop = !ReductionOperations.empty();
7403     if (InLoop) {
7404       InLoopReductionChains[Phi] = ReductionOperations;
7405       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7406       Instruction *LastChain = Phi;
7407       for (auto *I : ReductionOperations) {
7408         InLoopReductionImmediateChains[I] = LastChain;
7409         LastChain = I;
7410       }
7411     }
7412     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7413                       << " reduction for phi: " << *Phi << "\n");
7414   }
7415 }
7416 
7417 // TODO: we could return a pair of values that specify the max VF and
7418 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7419 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7420 // doesn't have a cost model that can choose which plan to execute if
7421 // more than one is generated.
7422 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7423                                  LoopVectorizationCostModel &CM) {
7424   unsigned WidestType;
7425   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7426   return WidestVectorRegBits / WidestType;
7427 }
7428 
7429 VectorizationFactor
7430 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7431   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7432   ElementCount VF = UserVF;
7433   // Outer loop handling: They may require CFG and instruction level
7434   // transformations before even evaluating whether vectorization is profitable.
7435   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7436   // the vectorization pipeline.
7437   if (!OrigLoop->isInnermost()) {
7438     // If the user doesn't provide a vectorization factor, determine a
7439     // reasonable one.
7440     if (UserVF.isZero()) {
7441       VF = ElementCount::getFixed(determineVPlanVF(
7442           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
7443               .getFixedSize(),
7444           CM));
7445       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7446 
7447       // Make sure we have a VF > 1 for stress testing.
7448       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7449         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7450                           << "overriding computed VF.\n");
7451         VF = ElementCount::getFixed(4);
7452       }
7453     }
7454     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7455     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7456            "VF needs to be a power of two");
7457     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7458                       << "VF " << VF << " to build VPlans.\n");
7459     buildVPlans(VF, VF);
7460 
7461     // For VPlan build stress testing, we bail out after VPlan construction.
7462     if (VPlanBuildStressTest)
7463       return VectorizationFactor::Disabled();
7464 
7465     return {VF, 0 /*Cost*/};
7466   }
7467 
7468   LLVM_DEBUG(
7469       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7470                 "VPlan-native path.\n");
7471   return VectorizationFactor::Disabled();
7472 }
7473 
7474 Optional<VectorizationFactor>
7475 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7476   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7477   FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
7478   if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
7479     return None;
7480 
7481   // Invalidate interleave groups if all blocks of loop will be predicated.
7482   if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
7483       !useMaskedInterleavedAccesses(*TTI)) {
7484     LLVM_DEBUG(
7485         dbgs()
7486         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7487            "which requires masked-interleaved support.\n");
7488     if (CM.InterleaveInfo.invalidateGroups())
7489       // Invalidating interleave groups also requires invalidating all decisions
7490       // based on them, which includes widening decisions and uniform and scalar
7491       // values.
7492       CM.invalidateCostModelingDecisions();
7493   }
7494 
7495   ElementCount MaxUserVF =
7496       UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
7497   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
7498   if (!UserVF.isZero() && UserVFIsLegal) {
7499     assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
7500            "VF needs to be a power of two");
7501     // Collect the instructions (and their associated costs) that will be more
7502     // profitable to scalarize.
7503     if (CM.selectUserVectorizationFactor(UserVF)) {
7504       LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
7505       CM.collectInLoopReductions();
7506       buildVPlansWithVPRecipes(UserVF, UserVF);
7507       LLVM_DEBUG(printPlans(dbgs()));
7508       return {{UserVF, 0}};
7509     } else
7510       reportVectorizationInfo("UserVF ignored because of invalid costs.",
7511                               "InvalidCost", ORE, OrigLoop);
7512   }
7513 
7514   // Populate the set of Vectorization Factor Candidates.
7515   ElementCountSet VFCandidates;
7516   for (auto VF = ElementCount::getFixed(1);
7517        ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
7518     VFCandidates.insert(VF);
7519   for (auto VF = ElementCount::getScalable(1);
7520        ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
7521     VFCandidates.insert(VF);
7522 
7523   for (const auto &VF : VFCandidates) {
7524     // Collect Uniform and Scalar instructions after vectorization with VF.
7525     CM.collectUniformsAndScalars(VF);
7526 
7527     // Collect the instructions (and their associated costs) that will be more
7528     // profitable to scalarize.
7529     if (VF.isVector())
7530       CM.collectInstsToScalarize(VF);
7531   }
7532 
7533   CM.collectInLoopReductions();
7534   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
7535   buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
7536 
7537   LLVM_DEBUG(printPlans(dbgs()));
7538   if (!MaxFactors.hasVector())
7539     return VectorizationFactor::Disabled();
7540 
7541   // Select the optimal vectorization factor.
7542   auto SelectedVF = CM.selectVectorizationFactor(VFCandidates);
7543 
7544   // Check if it is profitable to vectorize with runtime checks.
7545   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
7546   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
7547     bool PragmaThresholdReached =
7548         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
7549     bool ThresholdReached =
7550         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
7551     if ((ThresholdReached && !Hints.allowReordering()) ||
7552         PragmaThresholdReached) {
7553       ORE->emit([&]() {
7554         return OptimizationRemarkAnalysisAliasing(
7555                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
7556                    OrigLoop->getHeader())
7557                << "loop not vectorized: cannot prove it is safe to reorder "
7558                   "memory operations";
7559       });
7560       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
7561       Hints.emitRemarkWithHints();
7562       return VectorizationFactor::Disabled();
7563     }
7564   }
7565   return SelectedVF;
7566 }
7567 
7568 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const {
7569   assert(count_if(VPlans,
7570                   [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) ==
7571              1 &&
7572          "Best VF has not a single VPlan.");
7573 
7574   for (const VPlanPtr &Plan : VPlans) {
7575     if (Plan->hasVF(VF))
7576       return *Plan.get();
7577   }
7578   llvm_unreachable("No plan found!");
7579 }
7580 
7581 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7582   SmallVector<Metadata *, 4> MDs;
7583   // Reserve first location for self reference to the LoopID metadata node.
7584   MDs.push_back(nullptr);
7585   bool IsUnrollMetadata = false;
7586   MDNode *LoopID = L->getLoopID();
7587   if (LoopID) {
7588     // First find existing loop unrolling disable metadata.
7589     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7590       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7591       if (MD) {
7592         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7593         IsUnrollMetadata =
7594             S && S->getString().startswith("llvm.loop.unroll.disable");
7595       }
7596       MDs.push_back(LoopID->getOperand(i));
7597     }
7598   }
7599 
7600   if (!IsUnrollMetadata) {
7601     // Add runtime unroll disable metadata.
7602     LLVMContext &Context = L->getHeader()->getContext();
7603     SmallVector<Metadata *, 1> DisableOperands;
7604     DisableOperands.push_back(
7605         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
7606     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
7607     MDs.push_back(DisableNode);
7608     MDNode *NewLoopID = MDNode::get(Context, MDs);
7609     // Set operand 0 to refer to the loop id itself.
7610     NewLoopID->replaceOperandWith(0, NewLoopID);
7611     L->setLoopID(NewLoopID);
7612   }
7613 }
7614 
7615 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
7616                                            VPlan &BestVPlan,
7617                                            InnerLoopVectorizer &ILV,
7618                                            DominatorTree *DT) {
7619   LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF
7620                     << '\n');
7621 
7622   // Perform the actual loop transformation.
7623 
7624   // 1. Set up the skeleton for vectorization, including vector pre-header and
7625   // middle block. The vector loop is created during VPlan execution.
7626   VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan};
7627   Value *CanonicalIVStartValue;
7628   std::tie(State.CFG.PrevBB, CanonicalIVStartValue) =
7629       ILV.createVectorizedLoopSkeleton();
7630   ILV.collectPoisonGeneratingRecipes(State);
7631 
7632   ILV.printDebugTracesAtStart();
7633 
7634   //===------------------------------------------------===//
7635   //
7636   // Notice: any optimization or new instruction that go
7637   // into the code below should also be implemented in
7638   // the cost-model.
7639   //
7640   //===------------------------------------------------===//
7641 
7642   // 2. Copy and widen instructions from the old loop into the new loop.
7643   BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr),
7644                              ILV.getOrCreateVectorTripCount(nullptr),
7645                              CanonicalIVStartValue, State);
7646   BestVPlan.execute(&State);
7647 
7648   // Keep all loop hints from the original loop on the vector loop (we'll
7649   // replace the vectorizer-specific hints below).
7650   MDNode *OrigLoopID = OrigLoop->getLoopID();
7651 
7652   Optional<MDNode *> VectorizedLoopID =
7653       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
7654                                       LLVMLoopVectorizeFollowupVectorized});
7655 
7656   VPBasicBlock *HeaderVPBB =
7657       BestVPlan.getVectorLoopRegion()->getEntryBasicBlock();
7658   Loop *L = LI->getLoopFor(State.CFG.VPBB2IRBB[HeaderVPBB]);
7659   if (VectorizedLoopID.hasValue())
7660     L->setLoopID(VectorizedLoopID.getValue());
7661   else {
7662     // Keep all loop hints from the original loop on the vector loop (we'll
7663     // replace the vectorizer-specific hints below).
7664     if (MDNode *LID = OrigLoop->getLoopID())
7665       L->setLoopID(LID);
7666 
7667     LoopVectorizeHints Hints(L, true, *ORE);
7668     Hints.setAlreadyVectorized();
7669   }
7670   // Disable runtime unrolling when vectorizing the epilogue loop.
7671   if (CanonicalIVStartValue)
7672     AddRuntimeUnrollDisableMetaData(L);
7673 
7674   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7675   //    predication, updating analyses.
7676   ILV.fixVectorizedLoop(State, BestVPlan);
7677 
7678   ILV.printDebugTracesAtEnd();
7679 }
7680 
7681 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
7682 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
7683   for (const auto &Plan : VPlans)
7684     if (PrintVPlansInDotFormat)
7685       Plan->printDOT(O);
7686     else
7687       Plan->print(O);
7688 }
7689 #endif
7690 
7691 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
7692     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
7693 
7694   // We create new control-flow for the vectorized loop, so the original exit
7695   // conditions will be dead after vectorization if it's only used by the
7696   // terminator
7697   SmallVector<BasicBlock*> ExitingBlocks;
7698   OrigLoop->getExitingBlocks(ExitingBlocks);
7699   for (auto *BB : ExitingBlocks) {
7700     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
7701     if (!Cmp || !Cmp->hasOneUse())
7702       continue;
7703 
7704     // TODO: we should introduce a getUniqueExitingBlocks on Loop
7705     if (!DeadInstructions.insert(Cmp).second)
7706       continue;
7707 
7708     // The operands of the icmp is often a dead trunc, used by IndUpdate.
7709     // TODO: can recurse through operands in general
7710     for (Value *Op : Cmp->operands()) {
7711       if (isa<TruncInst>(Op) && Op->hasOneUse())
7712           DeadInstructions.insert(cast<Instruction>(Op));
7713     }
7714   }
7715 
7716   // We create new "steps" for induction variable updates to which the original
7717   // induction variables map. An original update instruction will be dead if
7718   // all its users except the induction variable are dead.
7719   auto *Latch = OrigLoop->getLoopLatch();
7720   for (auto &Induction : Legal->getInductionVars()) {
7721     PHINode *Ind = Induction.first;
7722     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
7723 
7724     // If the tail is to be folded by masking, the primary induction variable,
7725     // if exists, isn't dead: it will be used for masking. Don't kill it.
7726     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
7727       continue;
7728 
7729     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
7730           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
7731         }))
7732       DeadInstructions.insert(IndUpdate);
7733   }
7734 }
7735 
7736 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
7737 
7738 //===--------------------------------------------------------------------===//
7739 // EpilogueVectorizerMainLoop
7740 //===--------------------------------------------------------------------===//
7741 
7742 /// This function is partially responsible for generating the control flow
7743 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7744 std::pair<BasicBlock *, Value *>
7745 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
7746   MDNode *OrigLoopID = OrigLoop->getLoopID();
7747 
7748   // Workaround!  Compute the trip count of the original loop and cache it
7749   // before we start modifying the CFG.  This code has a systemic problem
7750   // wherein it tries to run analysis over partially constructed IR; this is
7751   // wrong, and not simply for SCEV.  The trip count of the original loop
7752   // simply happens to be prone to hitting this in practice.  In theory, we
7753   // can hit the same issue for any SCEV, or ValueTracking query done during
7754   // mutation.  See PR49900.
7755   getOrCreateTripCount(OrigLoop->getLoopPreheader());
7756   createVectorLoopSkeleton("");
7757 
7758   // Generate the code to check the minimum iteration count of the vector
7759   // epilogue (see below).
7760   EPI.EpilogueIterationCountCheck =
7761       emitMinimumIterationCountCheck(LoopScalarPreHeader, true);
7762   EPI.EpilogueIterationCountCheck->setName("iter.check");
7763 
7764   // Generate the code to check any assumptions that we've made for SCEV
7765   // expressions.
7766   EPI.SCEVSafetyCheck = emitSCEVChecks(LoopScalarPreHeader);
7767 
7768   // Generate the code that checks at runtime if arrays overlap. We put the
7769   // checks into a separate block to make the more common case of few elements
7770   // faster.
7771   EPI.MemSafetyCheck = emitMemRuntimeChecks(LoopScalarPreHeader);
7772 
7773   // Generate the iteration count check for the main loop, *after* the check
7774   // for the epilogue loop, so that the path-length is shorter for the case
7775   // that goes directly through the vector epilogue. The longer-path length for
7776   // the main loop is compensated for, by the gain from vectorizing the larger
7777   // trip count. Note: the branch will get updated later on when we vectorize
7778   // the epilogue.
7779   EPI.MainLoopIterationCountCheck =
7780       emitMinimumIterationCountCheck(LoopScalarPreHeader, false);
7781 
7782   // Generate the induction variable.
7783   EPI.VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader);
7784 
7785   // Skip induction resume value creation here because they will be created in
7786   // the second pass. If we created them here, they wouldn't be used anyway,
7787   // because the vplan in the second pass still contains the inductions from the
7788   // original loop.
7789 
7790   return {completeLoopSkeleton(OrigLoopID), nullptr};
7791 }
7792 
7793 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
7794   LLVM_DEBUG({
7795     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
7796            << "Main Loop VF:" << EPI.MainLoopVF
7797            << ", Main Loop UF:" << EPI.MainLoopUF
7798            << ", Epilogue Loop VF:" << EPI.EpilogueVF
7799            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
7800   });
7801 }
7802 
7803 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
7804   DEBUG_WITH_TYPE(VerboseDebug, {
7805     dbgs() << "intermediate fn:\n"
7806            << *OrigLoop->getHeader()->getParent() << "\n";
7807   });
7808 }
7809 
7810 BasicBlock *
7811 EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(BasicBlock *Bypass,
7812                                                            bool ForEpilogue) {
7813   assert(Bypass && "Expected valid bypass basic block.");
7814   ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF;
7815   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
7816   Value *Count = getOrCreateTripCount(LoopVectorPreHeader);
7817   // Reuse existing vector loop preheader for TC checks.
7818   // Note that new preheader block is generated for vector loop.
7819   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
7820   IRBuilder<> Builder(TCCheckBlock->getTerminator());
7821 
7822   // Generate code to check if the loop's trip count is less than VF * UF of the
7823   // main vector loop.
7824   auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ?
7825       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
7826 
7827   Value *CheckMinIters = Builder.CreateICmp(
7828       P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor),
7829       "min.iters.check");
7830 
7831   if (!ForEpilogue)
7832     TCCheckBlock->setName("vector.main.loop.iter.check");
7833 
7834   // Create new preheader for vector loop.
7835   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
7836                                    DT, LI, nullptr, "vector.ph");
7837 
7838   if (ForEpilogue) {
7839     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
7840                                  DT->getNode(Bypass)->getIDom()) &&
7841            "TC check is expected to dominate Bypass");
7842 
7843     // Update dominator for Bypass & LoopExit.
7844     DT->changeImmediateDominator(Bypass, TCCheckBlock);
7845     if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
7846       // For loops with multiple exits, there's no edge from the middle block
7847       // to exit blocks (as the epilogue must run) and thus no need to update
7848       // the immediate dominator of the exit blocks.
7849       DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
7850 
7851     LoopBypassBlocks.push_back(TCCheckBlock);
7852 
7853     // Save the trip count so we don't have to regenerate it in the
7854     // vec.epilog.iter.check. This is safe to do because the trip count
7855     // generated here dominates the vector epilog iter check.
7856     EPI.TripCount = Count;
7857   }
7858 
7859   ReplaceInstWithInst(
7860       TCCheckBlock->getTerminator(),
7861       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
7862 
7863   return TCCheckBlock;
7864 }
7865 
7866 //===--------------------------------------------------------------------===//
7867 // EpilogueVectorizerEpilogueLoop
7868 //===--------------------------------------------------------------------===//
7869 
7870 /// This function is partially responsible for generating the control flow
7871 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
7872 std::pair<BasicBlock *, Value *>
7873 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
7874   MDNode *OrigLoopID = OrigLoop->getLoopID();
7875   createVectorLoopSkeleton("vec.epilog.");
7876 
7877   // Now, compare the remaining count and if there aren't enough iterations to
7878   // execute the vectorized epilogue skip to the scalar part.
7879   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
7880   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
7881   LoopVectorPreHeader =
7882       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
7883                  LI, nullptr, "vec.epilog.ph");
7884   emitMinimumVectorEpilogueIterCountCheck(LoopScalarPreHeader,
7885                                           VecEpilogueIterationCountCheck);
7886 
7887   // Adjust the control flow taking the state info from the main loop
7888   // vectorization into account.
7889   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
7890          "expected this to be saved from the previous pass.");
7891   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
7892       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
7893 
7894   DT->changeImmediateDominator(LoopVectorPreHeader,
7895                                EPI.MainLoopIterationCountCheck);
7896 
7897   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
7898       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
7899 
7900   if (EPI.SCEVSafetyCheck)
7901     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
7902         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
7903   if (EPI.MemSafetyCheck)
7904     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
7905         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
7906 
7907   DT->changeImmediateDominator(
7908       VecEpilogueIterationCountCheck,
7909       VecEpilogueIterationCountCheck->getSinglePredecessor());
7910 
7911   DT->changeImmediateDominator(LoopScalarPreHeader,
7912                                EPI.EpilogueIterationCountCheck);
7913   if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
7914     // If there is an epilogue which must run, there's no edge from the
7915     // middle block to exit blocks  and thus no need to update the immediate
7916     // dominator of the exit blocks.
7917     DT->changeImmediateDominator(LoopExitBlock,
7918                                  EPI.EpilogueIterationCountCheck);
7919 
7920   // Keep track of bypass blocks, as they feed start values to the induction
7921   // phis in the scalar loop preheader.
7922   if (EPI.SCEVSafetyCheck)
7923     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
7924   if (EPI.MemSafetyCheck)
7925     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
7926   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
7927 
7928   // The vec.epilog.iter.check block may contain Phi nodes from reductions which
7929   // merge control-flow from the latch block and the middle block. Update the
7930   // incoming values here and move the Phi into the preheader.
7931   SmallVector<PHINode *, 4> PhisInBlock;
7932   for (PHINode &Phi : VecEpilogueIterationCountCheck->phis())
7933     PhisInBlock.push_back(&Phi);
7934 
7935   for (PHINode *Phi : PhisInBlock) {
7936     Phi->replaceIncomingBlockWith(
7937         VecEpilogueIterationCountCheck->getSinglePredecessor(),
7938         VecEpilogueIterationCountCheck);
7939     Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck);
7940     if (EPI.SCEVSafetyCheck)
7941       Phi->removeIncomingValue(EPI.SCEVSafetyCheck);
7942     if (EPI.MemSafetyCheck)
7943       Phi->removeIncomingValue(EPI.MemSafetyCheck);
7944     Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI());
7945   }
7946 
7947   // Generate a resume induction for the vector epilogue and put it in the
7948   // vector epilogue preheader
7949   Type *IdxTy = Legal->getWidestInductionType();
7950   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
7951                                          LoopVectorPreHeader->getFirstNonPHI());
7952   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
7953   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
7954                            EPI.MainLoopIterationCountCheck);
7955 
7956   // Generate induction resume values. These variables save the new starting
7957   // indexes for the scalar loop. They are used to test if there are any tail
7958   // iterations left once the vector loop has completed.
7959   // Note that when the vectorized epilogue is skipped due to iteration count
7960   // check, then the resume value for the induction variable comes from
7961   // the trip count of the main vector loop, hence passing the AdditionalBypass
7962   // argument.
7963   createInductionResumeValues({VecEpilogueIterationCountCheck,
7964                                EPI.VectorTripCount} /* AdditionalBypass */);
7965 
7966   return {completeLoopSkeleton(OrigLoopID), EPResumeVal};
7967 }
7968 
7969 BasicBlock *
7970 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
7971     BasicBlock *Bypass, BasicBlock *Insert) {
7972 
7973   assert(EPI.TripCount &&
7974          "Expected trip count to have been safed in the first pass.");
7975   assert(
7976       (!isa<Instruction>(EPI.TripCount) ||
7977        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
7978       "saved trip count does not dominate insertion point.");
7979   Value *TC = EPI.TripCount;
7980   IRBuilder<> Builder(Insert->getTerminator());
7981   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
7982 
7983   // Generate code to check if the loop's trip count is less than VF * UF of the
7984   // vector epilogue loop.
7985   auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ?
7986       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
7987 
7988   Value *CheckMinIters =
7989       Builder.CreateICmp(P, Count,
7990                          createStepForVF(Builder, Count->getType(),
7991                                          EPI.EpilogueVF, EPI.EpilogueUF),
7992                          "min.epilog.iters.check");
7993 
7994   ReplaceInstWithInst(
7995       Insert->getTerminator(),
7996       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
7997 
7998   LoopBypassBlocks.push_back(Insert);
7999   return Insert;
8000 }
8001 
8002 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8003   LLVM_DEBUG({
8004     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8005            << "Epilogue Loop VF:" << EPI.EpilogueVF
8006            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8007   });
8008 }
8009 
8010 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8011   DEBUG_WITH_TYPE(VerboseDebug, {
8012     dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
8013   });
8014 }
8015 
8016 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8017     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8018   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8019   bool PredicateAtRangeStart = Predicate(Range.Start);
8020 
8021   for (ElementCount TmpVF = Range.Start * 2;
8022        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8023     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8024       Range.End = TmpVF;
8025       break;
8026     }
8027 
8028   return PredicateAtRangeStart;
8029 }
8030 
8031 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8032 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8033 /// of VF's starting at a given VF and extending it as much as possible. Each
8034 /// vectorization decision can potentially shorten this sub-range during
8035 /// buildVPlan().
8036 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8037                                            ElementCount MaxVF) {
8038   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8039   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8040     VFRange SubRange = {VF, MaxVFPlusOne};
8041     VPlans.push_back(buildVPlan(SubRange));
8042     VF = SubRange.End;
8043   }
8044 }
8045 
8046 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8047                                          VPlanPtr &Plan) {
8048   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8049 
8050   // Look for cached value.
8051   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8052   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8053   if (ECEntryIt != EdgeMaskCache.end())
8054     return ECEntryIt->second;
8055 
8056   VPValue *SrcMask = createBlockInMask(Src, Plan);
8057 
8058   // The terminator has to be a branch inst!
8059   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8060   assert(BI && "Unexpected terminator found");
8061 
8062   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8063     return EdgeMaskCache[Edge] = SrcMask;
8064 
8065   // If source is an exiting block, we know the exit edge is dynamically dead
8066   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8067   // adding uses of an otherwise potentially dead instruction.
8068   if (OrigLoop->isLoopExiting(Src))
8069     return EdgeMaskCache[Edge] = SrcMask;
8070 
8071   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8072   assert(EdgeMask && "No Edge Mask found for condition");
8073 
8074   if (BI->getSuccessor(0) != Dst)
8075     EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc());
8076 
8077   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8078     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8079     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8080     // The select version does not introduce new UB if SrcMask is false and
8081     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8082     VPValue *False = Plan->getOrAddVPValue(
8083         ConstantInt::getFalse(BI->getCondition()->getType()));
8084     EdgeMask =
8085         Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc());
8086   }
8087 
8088   return EdgeMaskCache[Edge] = EdgeMask;
8089 }
8090 
8091 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8092   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8093 
8094   // Look for cached value.
8095   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8096   if (BCEntryIt != BlockMaskCache.end())
8097     return BCEntryIt->second;
8098 
8099   // All-one mask is modelled as no-mask following the convention for masked
8100   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8101   VPValue *BlockMask = nullptr;
8102 
8103   if (OrigLoop->getHeader() == BB) {
8104     if (!CM.blockNeedsPredicationForAnyReason(BB))
8105       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8106 
8107     // Introduce the early-exit compare IV <= BTC to form header block mask.
8108     // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by
8109     // constructing the desired canonical IV in the header block as its first
8110     // non-phi instructions.
8111     assert(CM.foldTailByMasking() && "must fold the tail");
8112     VPBasicBlock *HeaderVPBB =
8113         Plan->getVectorLoopRegion()->getEntryBasicBlock();
8114     auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi();
8115     auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV());
8116     HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi());
8117 
8118     VPBuilder::InsertPointGuard Guard(Builder);
8119     Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint);
8120     if (CM.TTI.emitGetActiveLaneMask()) {
8121       VPValue *TC = Plan->getOrCreateTripCount();
8122       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC});
8123     } else {
8124       VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8125       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8126     }
8127     return BlockMaskCache[BB] = BlockMask;
8128   }
8129 
8130   // This is the block mask. We OR all incoming edges.
8131   for (auto *Predecessor : predecessors(BB)) {
8132     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8133     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8134       return BlockMaskCache[BB] = EdgeMask;
8135 
8136     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8137       BlockMask = EdgeMask;
8138       continue;
8139     }
8140 
8141     BlockMask = Builder.createOr(BlockMask, EdgeMask, {});
8142   }
8143 
8144   return BlockMaskCache[BB] = BlockMask;
8145 }
8146 
8147 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8148                                                 ArrayRef<VPValue *> Operands,
8149                                                 VFRange &Range,
8150                                                 VPlanPtr &Plan) {
8151   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8152          "Must be called with either a load or store");
8153 
8154   auto willWiden = [&](ElementCount VF) -> bool {
8155     if (VF.isScalar())
8156       return false;
8157     LoopVectorizationCostModel::InstWidening Decision =
8158         CM.getWideningDecision(I, VF);
8159     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8160            "CM decision should be taken at this point.");
8161     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8162       return true;
8163     if (CM.isScalarAfterVectorization(I, VF) ||
8164         CM.isProfitableToScalarize(I, VF))
8165       return false;
8166     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8167   };
8168 
8169   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8170     return nullptr;
8171 
8172   VPValue *Mask = nullptr;
8173   if (Legal->isMaskRequired(I))
8174     Mask = createBlockInMask(I->getParent(), Plan);
8175 
8176   // Determine if the pointer operand of the access is either consecutive or
8177   // reverse consecutive.
8178   LoopVectorizationCostModel::InstWidening Decision =
8179       CM.getWideningDecision(I, Range.Start);
8180   bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse;
8181   bool Consecutive =
8182       Reverse || Decision == LoopVectorizationCostModel::CM_Widen;
8183 
8184   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8185     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask,
8186                                               Consecutive, Reverse);
8187 
8188   StoreInst *Store = cast<StoreInst>(I);
8189   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8190                                             Mask, Consecutive, Reverse);
8191 }
8192 
8193 /// Creates a VPWidenIntOrFpInductionRecpipe for \p Phi. If needed, it will also
8194 /// insert a recipe to expand the step for the induction recipe.
8195 static VPWidenIntOrFpInductionRecipe *createWidenInductionRecipes(
8196     PHINode *Phi, Instruction *PhiOrTrunc, VPValue *Start,
8197     const InductionDescriptor &IndDesc, LoopVectorizationCostModel &CM,
8198     VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop, VFRange &Range) {
8199   // Returns true if an instruction \p I should be scalarized instead of
8200   // vectorized for the chosen vectorization factor.
8201   auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) {
8202     return CM.isScalarAfterVectorization(I, VF) ||
8203            CM.isProfitableToScalarize(I, VF);
8204   };
8205 
8206   bool NeedsScalarIV = LoopVectorizationPlanner::getDecisionAndClampRange(
8207       [&](ElementCount VF) {
8208         // Returns true if we should generate a scalar version of \p IV.
8209         if (ShouldScalarizeInstruction(PhiOrTrunc, VF))
8210           return true;
8211         auto isScalarInst = [&](User *U) -> bool {
8212           auto *I = cast<Instruction>(U);
8213           return OrigLoop.contains(I) && ShouldScalarizeInstruction(I, VF);
8214         };
8215         return any_of(PhiOrTrunc->users(), isScalarInst);
8216       },
8217       Range);
8218   bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange(
8219       [&](ElementCount VF) {
8220         return ShouldScalarizeInstruction(PhiOrTrunc, VF);
8221       },
8222       Range);
8223   assert(IndDesc.getStartValue() ==
8224          Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader()));
8225   assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) &&
8226          "step must be loop invariant");
8227 
8228   VPValue *Step =
8229       vputils::getOrCreateVPValueForSCEVExpr(Plan, IndDesc.getStep(), SE);
8230   if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) {
8231     return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, IndDesc, TruncI,
8232                                              NeedsScalarIV, !NeedsScalarIVOnly);
8233   }
8234   assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here");
8235   return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, IndDesc,
8236                                            NeedsScalarIV, !NeedsScalarIVOnly);
8237 }
8238 
8239 VPRecipeBase *VPRecipeBuilder::tryToOptimizeInductionPHI(
8240     PHINode *Phi, ArrayRef<VPValue *> Operands, VPlan &Plan, VFRange &Range) {
8241 
8242   // Check if this is an integer or fp induction. If so, build the recipe that
8243   // produces its scalar and vector values.
8244   if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi))
8245     return createWidenInductionRecipes(Phi, Phi, Operands[0], *II, CM, Plan,
8246                                        *PSE.getSE(), *OrigLoop, Range);
8247 
8248   // Check if this is pointer induction. If so, build the recipe for it.
8249   if (auto *II = Legal->getPointerInductionDescriptor(Phi))
8250     return new VPWidenPointerInductionRecipe(Phi, Operands[0], *II,
8251                                              *PSE.getSE());
8252   return nullptr;
8253 }
8254 
8255 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8256     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, VPlan &Plan) {
8257   // Optimize the special case where the source is a constant integer
8258   // induction variable. Notice that we can only optimize the 'trunc' case
8259   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8260   // (c) other casts depend on pointer size.
8261 
8262   // Determine whether \p K is a truncation based on an induction variable that
8263   // can be optimized.
8264   auto isOptimizableIVTruncate =
8265       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8266     return [=](ElementCount VF) -> bool {
8267       return CM.isOptimizableIVTruncate(K, VF);
8268     };
8269   };
8270 
8271   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8272           isOptimizableIVTruncate(I), Range)) {
8273 
8274     auto *Phi = cast<PHINode>(I->getOperand(0));
8275     const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi);
8276     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8277     return createWidenInductionRecipes(Phi, I, Start, II, CM, Plan,
8278                                        *PSE.getSE(), *OrigLoop, Range);
8279   }
8280   return nullptr;
8281 }
8282 
8283 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8284                                                 ArrayRef<VPValue *> Operands,
8285                                                 VPlanPtr &Plan) {
8286   // If all incoming values are equal, the incoming VPValue can be used directly
8287   // instead of creating a new VPBlendRecipe.
8288   VPValue *FirstIncoming = Operands[0];
8289   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8290         return FirstIncoming == Inc;
8291       })) {
8292     return Operands[0];
8293   }
8294 
8295   unsigned NumIncoming = Phi->getNumIncomingValues();
8296   // For in-loop reductions, we do not need to create an additional select.
8297   VPValue *InLoopVal = nullptr;
8298   for (unsigned In = 0; In < NumIncoming; In++) {
8299     PHINode *PhiOp =
8300         dyn_cast_or_null<PHINode>(Operands[In]->getUnderlyingValue());
8301     if (PhiOp && CM.isInLoopReduction(PhiOp)) {
8302       assert(!InLoopVal && "Found more than one in-loop reduction!");
8303       InLoopVal = Operands[In];
8304     }
8305   }
8306 
8307   assert((!InLoopVal || NumIncoming == 2) &&
8308          "Found an in-loop reduction for PHI with unexpected number of "
8309          "incoming values");
8310   if (InLoopVal)
8311     return Operands[Operands[0] == InLoopVal ? 1 : 0];
8312 
8313   // We know that all PHIs in non-header blocks are converted into selects, so
8314   // we don't have to worry about the insertion order and we can just use the
8315   // builder. At this point we generate the predication tree. There may be
8316   // duplications since this is a simple recursive scan, but future
8317   // optimizations will clean it up.
8318   SmallVector<VPValue *, 2> OperandsWithMask;
8319 
8320   for (unsigned In = 0; In < NumIncoming; In++) {
8321     VPValue *EdgeMask =
8322       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8323     assert((EdgeMask || NumIncoming == 1) &&
8324            "Multiple predecessors with one having a full mask");
8325     OperandsWithMask.push_back(Operands[In]);
8326     if (EdgeMask)
8327       OperandsWithMask.push_back(EdgeMask);
8328   }
8329   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8330 }
8331 
8332 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8333                                                    ArrayRef<VPValue *> Operands,
8334                                                    VFRange &Range) const {
8335 
8336   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8337       [this, CI](ElementCount VF) {
8338         return CM.isScalarWithPredication(CI, VF);
8339       },
8340       Range);
8341 
8342   if (IsPredicated)
8343     return nullptr;
8344 
8345   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8346   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8347              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8348              ID == Intrinsic::pseudoprobe ||
8349              ID == Intrinsic::experimental_noalias_scope_decl))
8350     return nullptr;
8351 
8352   auto willWiden = [&](ElementCount VF) -> bool {
8353     if (VF.isScalar())
8354        return false;
8355     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8356     // The following case may be scalarized depending on the VF.
8357     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8358     // version of the instruction.
8359     // Is it beneficial to perform intrinsic call compared to lib call?
8360     bool NeedToScalarize = false;
8361     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8362     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8363     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8364     return UseVectorIntrinsic || !NeedToScalarize;
8365   };
8366 
8367   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8368     return nullptr;
8369 
8370   ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size());
8371   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8372 }
8373 
8374 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8375   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8376          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8377   // Instruction should be widened, unless it is scalar after vectorization,
8378   // scalarization is profitable or it is predicated.
8379   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8380     return CM.isScalarAfterVectorization(I, VF) ||
8381            CM.isProfitableToScalarize(I, VF) ||
8382            CM.isScalarWithPredication(I, VF);
8383   };
8384   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8385                                                              Range);
8386 }
8387 
8388 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8389                                            ArrayRef<VPValue *> Operands) const {
8390   auto IsVectorizableOpcode = [](unsigned Opcode) {
8391     switch (Opcode) {
8392     case Instruction::Add:
8393     case Instruction::And:
8394     case Instruction::AShr:
8395     case Instruction::BitCast:
8396     case Instruction::FAdd:
8397     case Instruction::FCmp:
8398     case Instruction::FDiv:
8399     case Instruction::FMul:
8400     case Instruction::FNeg:
8401     case Instruction::FPExt:
8402     case Instruction::FPToSI:
8403     case Instruction::FPToUI:
8404     case Instruction::FPTrunc:
8405     case Instruction::FRem:
8406     case Instruction::FSub:
8407     case Instruction::ICmp:
8408     case Instruction::IntToPtr:
8409     case Instruction::LShr:
8410     case Instruction::Mul:
8411     case Instruction::Or:
8412     case Instruction::PtrToInt:
8413     case Instruction::SDiv:
8414     case Instruction::Select:
8415     case Instruction::SExt:
8416     case Instruction::Shl:
8417     case Instruction::SIToFP:
8418     case Instruction::SRem:
8419     case Instruction::Sub:
8420     case Instruction::Trunc:
8421     case Instruction::UDiv:
8422     case Instruction::UIToFP:
8423     case Instruction::URem:
8424     case Instruction::Xor:
8425     case Instruction::ZExt:
8426       return true;
8427     }
8428     return false;
8429   };
8430 
8431   if (!IsVectorizableOpcode(I->getOpcode()))
8432     return nullptr;
8433 
8434   // Success: widen this instruction.
8435   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8436 }
8437 
8438 void VPRecipeBuilder::fixHeaderPhis() {
8439   BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
8440   for (VPHeaderPHIRecipe *R : PhisToFix) {
8441     auto *PN = cast<PHINode>(R->getUnderlyingValue());
8442     VPRecipeBase *IncR =
8443         getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8444     R->addOperand(IncR->getVPSingleValue());
8445   }
8446 }
8447 
8448 VPBasicBlock *VPRecipeBuilder::handleReplication(
8449     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8450     VPlanPtr &Plan) {
8451   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8452       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8453       Range);
8454 
8455   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8456       [&](ElementCount VF) { return CM.isPredicatedInst(I, VF, IsUniform); },
8457       Range);
8458 
8459   // Even if the instruction is not marked as uniform, there are certain
8460   // intrinsic calls that can be effectively treated as such, so we check for
8461   // them here. Conservatively, we only do this for scalable vectors, since
8462   // for fixed-width VFs we can always fall back on full scalarization.
8463   if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
8464     switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
8465     case Intrinsic::assume:
8466     case Intrinsic::lifetime_start:
8467     case Intrinsic::lifetime_end:
8468       // For scalable vectors if one of the operands is variant then we still
8469       // want to mark as uniform, which will generate one instruction for just
8470       // the first lane of the vector. We can't scalarize the call in the same
8471       // way as for fixed-width vectors because we don't know how many lanes
8472       // there are.
8473       //
8474       // The reasons for doing it this way for scalable vectors are:
8475       //   1. For the assume intrinsic generating the instruction for the first
8476       //      lane is still be better than not generating any at all. For
8477       //      example, the input may be a splat across all lanes.
8478       //   2. For the lifetime start/end intrinsics the pointer operand only
8479       //      does anything useful when the input comes from a stack object,
8480       //      which suggests it should always be uniform. For non-stack objects
8481       //      the effect is to poison the object, which still allows us to
8482       //      remove the call.
8483       IsUniform = true;
8484       break;
8485     default:
8486       break;
8487     }
8488   }
8489 
8490   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8491                                        IsUniform, IsPredicated);
8492   setRecipe(I, Recipe);
8493   Plan->addVPValue(I, Recipe);
8494 
8495   // Find if I uses a predicated instruction. If so, it will use its scalar
8496   // value. Avoid hoisting the insert-element which packs the scalar value into
8497   // a vector value, as that happens iff all users use the vector value.
8498   for (VPValue *Op : Recipe->operands()) {
8499     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
8500     if (!PredR)
8501       continue;
8502     auto *RepR =
8503         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
8504     assert(RepR->isPredicated() &&
8505            "expected Replicate recipe to be predicated");
8506     RepR->setAlsoPack(false);
8507   }
8508 
8509   // Finalize the recipe for Instr, first if it is not predicated.
8510   if (!IsPredicated) {
8511     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8512     VPBB->appendRecipe(Recipe);
8513     return VPBB;
8514   }
8515   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8516 
8517   VPBlockBase *SingleSucc = VPBB->getSingleSuccessor();
8518   assert(SingleSucc && "VPBB must have a single successor when handling "
8519                        "predicated replication.");
8520   VPBlockUtils::disconnectBlocks(VPBB, SingleSucc);
8521   // Record predicated instructions for above packing optimizations.
8522   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8523   VPBlockUtils::insertBlockAfter(Region, VPBB);
8524   auto *RegSucc = new VPBasicBlock();
8525   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8526   VPBlockUtils::connectBlocks(RegSucc, SingleSucc);
8527   return RegSucc;
8528 }
8529 
8530 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8531                                                       VPRecipeBase *PredRecipe,
8532                                                       VPlanPtr &Plan) {
8533   // Instructions marked for predication are replicated and placed under an
8534   // if-then construct to prevent side-effects.
8535 
8536   // Generate recipes to compute the block mask for this region.
8537   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8538 
8539   // Build the triangular if-then region.
8540   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8541   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8542   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8543   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8544   auto *PHIRecipe = Instr->getType()->isVoidTy()
8545                         ? nullptr
8546                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8547   if (PHIRecipe) {
8548     Plan->removeVPValueFor(Instr);
8549     Plan->addVPValue(Instr, PHIRecipe);
8550   }
8551   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8552   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8553   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8554 
8555   // Note: first set Entry as region entry and then connect successors starting
8556   // from it in order, to propagate the "parent" of each VPBasicBlock.
8557   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8558   VPBlockUtils::connectBlocks(Pred, Exit);
8559 
8560   return Region;
8561 }
8562 
8563 VPRecipeOrVPValueTy
8564 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8565                                         ArrayRef<VPValue *> Operands,
8566                                         VFRange &Range, VPlanPtr &Plan) {
8567   // First, check for specific widening recipes that deal with calls, memory
8568   // operations, inductions and Phi nodes.
8569   if (auto *CI = dyn_cast<CallInst>(Instr))
8570     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
8571 
8572   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8573     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
8574 
8575   VPRecipeBase *Recipe;
8576   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8577     if (Phi->getParent() != OrigLoop->getHeader())
8578       return tryToBlend(Phi, Operands, Plan);
8579     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, *Plan, Range)))
8580       return toVPRecipeResult(Recipe);
8581 
8582     VPHeaderPHIRecipe *PhiRecipe = nullptr;
8583     assert((Legal->isReductionVariable(Phi) ||
8584             Legal->isFirstOrderRecurrence(Phi)) &&
8585            "can only widen reductions and first-order recurrences here");
8586     VPValue *StartV = Operands[0];
8587     if (Legal->isReductionVariable(Phi)) {
8588       const RecurrenceDescriptor &RdxDesc =
8589           Legal->getReductionVars().find(Phi)->second;
8590       assert(RdxDesc.getRecurrenceStartValue() ==
8591              Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8592       PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV,
8593                                            CM.isInLoopReduction(Phi),
8594                                            CM.useOrderedReductions(RdxDesc));
8595     } else {
8596       PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
8597     }
8598 
8599       // Record the incoming value from the backedge, so we can add the incoming
8600       // value from the backedge after all recipes have been created.
8601       recordRecipeOf(cast<Instruction>(
8602           Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
8603       PhisToFix.push_back(PhiRecipe);
8604       return toVPRecipeResult(PhiRecipe);
8605   }
8606 
8607   if (isa<TruncInst>(Instr) &&
8608       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
8609                                                Range, *Plan)))
8610     return toVPRecipeResult(Recipe);
8611 
8612   if (!shouldWiden(Instr, Range))
8613     return nullptr;
8614 
8615   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8616     return toVPRecipeResult(new VPWidenGEPRecipe(
8617         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
8618 
8619   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8620     bool InvariantCond =
8621         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8622     return toVPRecipeResult(new VPWidenSelectRecipe(
8623         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
8624   }
8625 
8626   return toVPRecipeResult(tryToWiden(Instr, Operands));
8627 }
8628 
8629 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8630                                                         ElementCount MaxVF) {
8631   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8632 
8633   // Collect instructions from the original loop that will become trivially dead
8634   // in the vectorized loop. We don't need to vectorize these instructions. For
8635   // example, original induction update instructions can become dead because we
8636   // separately emit induction "steps" when generating code for the new loop.
8637   // Similarly, we create a new latch condition when setting up the structure
8638   // of the new loop, so the old one can become dead.
8639   SmallPtrSet<Instruction *, 4> DeadInstructions;
8640   collectTriviallyDeadInstructions(DeadInstructions);
8641 
8642   // Add assume instructions we need to drop to DeadInstructions, to prevent
8643   // them from being added to the VPlan.
8644   // TODO: We only need to drop assumes in blocks that get flattend. If the
8645   // control flow is preserved, we should keep them.
8646   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8647   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8648 
8649   MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8650   // Dead instructions do not need sinking. Remove them from SinkAfter.
8651   for (Instruction *I : DeadInstructions)
8652     SinkAfter.erase(I);
8653 
8654   // Cannot sink instructions after dead instructions (there won't be any
8655   // recipes for them). Instead, find the first non-dead previous instruction.
8656   for (auto &P : Legal->getSinkAfter()) {
8657     Instruction *SinkTarget = P.second;
8658     Instruction *FirstInst = &*SinkTarget->getParent()->begin();
8659     (void)FirstInst;
8660     while (DeadInstructions.contains(SinkTarget)) {
8661       assert(
8662           SinkTarget != FirstInst &&
8663           "Must find a live instruction (at least the one feeding the "
8664           "first-order recurrence PHI) before reaching beginning of the block");
8665       SinkTarget = SinkTarget->getPrevNode();
8666       assert(SinkTarget != P.first &&
8667              "sink source equals target, no sinking required");
8668     }
8669     P.second = SinkTarget;
8670   }
8671 
8672   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8673   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8674     VFRange SubRange = {VF, MaxVFPlusOne};
8675     VPlans.push_back(
8676         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
8677     VF = SubRange.End;
8678   }
8679 }
8680 
8681 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header, a
8682 // CanonicalIVIncrement{NUW} VPInstruction to increment it by VF * UF and a
8683 // BranchOnCount VPInstruction to the latch.
8684 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL,
8685                                   bool HasNUW, bool IsVPlanNative) {
8686   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8687   auto *StartV = Plan.getOrAddVPValue(StartIdx);
8688 
8689   auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL);
8690   VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
8691   VPBasicBlock *Header = TopRegion->getEntryBasicBlock();
8692   Header->insert(CanonicalIVPHI, Header->begin());
8693 
8694   auto *CanonicalIVIncrement =
8695       new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW
8696                                : VPInstruction::CanonicalIVIncrement,
8697                         {CanonicalIVPHI}, DL);
8698   CanonicalIVPHI->addOperand(CanonicalIVIncrement);
8699 
8700   VPBasicBlock *EB = TopRegion->getExitBasicBlock();
8701   if (IsVPlanNative)
8702     EB->setCondBit(nullptr);
8703   EB->appendRecipe(CanonicalIVIncrement);
8704 
8705   auto *BranchOnCount =
8706       new VPInstruction(VPInstruction::BranchOnCount,
8707                         {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL);
8708   EB->appendRecipe(BranchOnCount);
8709 }
8710 
8711 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
8712     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
8713     const MapVector<Instruction *, Instruction *> &SinkAfter) {
8714 
8715   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8716 
8717   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8718 
8719   // ---------------------------------------------------------------------------
8720   // Pre-construction: record ingredients whose recipes we'll need to further
8721   // process after constructing the initial VPlan.
8722   // ---------------------------------------------------------------------------
8723 
8724   // Mark instructions we'll need to sink later and their targets as
8725   // ingredients whose recipe we'll need to record.
8726   for (auto &Entry : SinkAfter) {
8727     RecipeBuilder.recordRecipeOf(Entry.first);
8728     RecipeBuilder.recordRecipeOf(Entry.second);
8729   }
8730   for (auto &Reduction : CM.getInLoopReductionChains()) {
8731     PHINode *Phi = Reduction.first;
8732     RecurKind Kind =
8733         Legal->getReductionVars().find(Phi)->second.getRecurrenceKind();
8734     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8735 
8736     RecipeBuilder.recordRecipeOf(Phi);
8737     for (auto &R : ReductionOperations) {
8738       RecipeBuilder.recordRecipeOf(R);
8739       // For min/max reductions, where we have a pair of icmp/select, we also
8740       // need to record the ICmp recipe, so it can be removed later.
8741       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
8742              "Only min/max recurrences allowed for inloop reductions");
8743       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
8744         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
8745     }
8746   }
8747 
8748   // For each interleave group which is relevant for this (possibly trimmed)
8749   // Range, add it to the set of groups to be later applied to the VPlan and add
8750   // placeholders for its members' Recipes which we'll be replacing with a
8751   // single VPInterleaveRecipe.
8752   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
8753     auto applyIG = [IG, this](ElementCount VF) -> bool {
8754       return (VF.isVector() && // Query is illegal for VF == 1
8755               CM.getWideningDecision(IG->getInsertPos(), VF) ==
8756                   LoopVectorizationCostModel::CM_Interleave);
8757     };
8758     if (!getDecisionAndClampRange(applyIG, Range))
8759       continue;
8760     InterleaveGroups.insert(IG);
8761     for (unsigned i = 0; i < IG->getFactor(); i++)
8762       if (Instruction *Member = IG->getMember(i))
8763         RecipeBuilder.recordRecipeOf(Member);
8764   };
8765 
8766   // ---------------------------------------------------------------------------
8767   // Build initial VPlan: Scan the body of the loop in a topological order to
8768   // visit each basic block after having visited its predecessor basic blocks.
8769   // ---------------------------------------------------------------------------
8770 
8771   // Create initial VPlan skeleton, starting with a block for the pre-header,
8772   // followed by a region for the vector loop, followed by the middle block. The
8773   // skeleton vector loop region contains a header and latch block.
8774   VPBasicBlock *Preheader = new VPBasicBlock("vector.ph");
8775   auto Plan = std::make_unique<VPlan>(Preheader);
8776 
8777   VPBasicBlock *HeaderVPBB = new VPBasicBlock("vector.body");
8778   VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch");
8779   VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB);
8780   auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop");
8781   VPBlockUtils::insertBlockAfter(TopRegion, Preheader);
8782   VPBasicBlock *MiddleVPBB = new VPBasicBlock("middle.block");
8783   VPBlockUtils::insertBlockAfter(MiddleVPBB, TopRegion);
8784 
8785   Instruction *DLInst =
8786       getDebugLocFromInstOrOperands(Legal->getPrimaryInduction());
8787   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(),
8788                         DLInst ? DLInst->getDebugLoc() : DebugLoc(),
8789                         !CM.foldTailByMasking(), false);
8790 
8791   // Scan the body of the loop in a topological order to visit each basic block
8792   // after having visited its predecessor basic blocks.
8793   LoopBlocksDFS DFS(OrigLoop);
8794   DFS.perform(LI);
8795 
8796   VPBasicBlock *VPBB = HeaderVPBB;
8797   SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove;
8798   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
8799     // Relevant instructions from basic block BB will be grouped into VPRecipe
8800     // ingredients and fill a new VPBasicBlock.
8801     unsigned VPBBsForBB = 0;
8802     if (VPBB != HeaderVPBB)
8803       VPBB->setName(BB->getName());
8804     Builder.setInsertPoint(VPBB);
8805 
8806     // Introduce each ingredient into VPlan.
8807     // TODO: Model and preserve debug intrinsics in VPlan.
8808     for (Instruction &I : BB->instructionsWithoutDebug()) {
8809       Instruction *Instr = &I;
8810 
8811       // First filter out irrelevant instructions, to ensure no recipes are
8812       // built for them.
8813       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
8814         continue;
8815 
8816       SmallVector<VPValue *, 4> Operands;
8817       auto *Phi = dyn_cast<PHINode>(Instr);
8818       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
8819         Operands.push_back(Plan->getOrAddVPValue(
8820             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
8821       } else {
8822         auto OpRange = Plan->mapToVPValues(Instr->operands());
8823         Operands = {OpRange.begin(), OpRange.end()};
8824       }
8825       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
8826               Instr, Operands, Range, Plan)) {
8827         // If Instr can be simplified to an existing VPValue, use it.
8828         if (RecipeOrValue.is<VPValue *>()) {
8829           auto *VPV = RecipeOrValue.get<VPValue *>();
8830           Plan->addVPValue(Instr, VPV);
8831           // If the re-used value is a recipe, register the recipe for the
8832           // instruction, in case the recipe for Instr needs to be recorded.
8833           if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef()))
8834             RecipeBuilder.setRecipe(Instr, R);
8835           continue;
8836         }
8837         // Otherwise, add the new recipe.
8838         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
8839         for (auto *Def : Recipe->definedValues()) {
8840           auto *UV = Def->getUnderlyingValue();
8841           Plan->addVPValue(UV, Def);
8842         }
8843 
8844         if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) &&
8845             HeaderVPBB->getFirstNonPhi() != VPBB->end()) {
8846           // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section
8847           // of the header block. That can happen for truncates of induction
8848           // variables. Those recipes are moved to the phi section of the header
8849           // block after applying SinkAfter, which relies on the original
8850           // position of the trunc.
8851           assert(isa<TruncInst>(Instr));
8852           InductionsToMove.push_back(
8853               cast<VPWidenIntOrFpInductionRecipe>(Recipe));
8854         }
8855         RecipeBuilder.setRecipe(Instr, Recipe);
8856         VPBB->appendRecipe(Recipe);
8857         continue;
8858       }
8859 
8860       // Invariant stores inside loop will be deleted and a single store
8861       // with the final reduction value will be added to the exit block
8862       StoreInst *SI;
8863       if ((SI = dyn_cast<StoreInst>(&I)) &&
8864           Legal->isInvariantAddressOfReduction(SI->getPointerOperand()))
8865         continue;
8866 
8867       // Otherwise, if all widening options failed, Instruction is to be
8868       // replicated. This may create a successor for VPBB.
8869       VPBasicBlock *NextVPBB =
8870           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
8871       if (NextVPBB != VPBB) {
8872         VPBB = NextVPBB;
8873         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
8874                                     : "");
8875       }
8876     }
8877 
8878     VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB);
8879     VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor());
8880   }
8881 
8882   HeaderVPBB->setName("vector.body");
8883 
8884   // Fold the last, empty block into its predecessor.
8885   VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB);
8886   assert(VPBB && "expected to fold last (empty) block");
8887   // After here, VPBB should not be used.
8888   VPBB = nullptr;
8889 
8890   assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) &&
8891          !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() &&
8892          "entry block must be set to a VPRegionBlock having a non-empty entry "
8893          "VPBasicBlock");
8894   RecipeBuilder.fixHeaderPhis();
8895 
8896   // ---------------------------------------------------------------------------
8897   // Transform initial VPlan: Apply previously taken decisions, in order, to
8898   // bring the VPlan to its final state.
8899   // ---------------------------------------------------------------------------
8900 
8901   // Apply Sink-After legal constraints.
8902   auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
8903     auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
8904     if (Region && Region->isReplicator()) {
8905       assert(Region->getNumSuccessors() == 1 &&
8906              Region->getNumPredecessors() == 1 && "Expected SESE region!");
8907       assert(R->getParent()->size() == 1 &&
8908              "A recipe in an original replicator region must be the only "
8909              "recipe in its block");
8910       return Region;
8911     }
8912     return nullptr;
8913   };
8914   for (auto &Entry : SinkAfter) {
8915     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
8916     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
8917 
8918     auto *TargetRegion = GetReplicateRegion(Target);
8919     auto *SinkRegion = GetReplicateRegion(Sink);
8920     if (!SinkRegion) {
8921       // If the sink source is not a replicate region, sink the recipe directly.
8922       if (TargetRegion) {
8923         // The target is in a replication region, make sure to move Sink to
8924         // the block after it, not into the replication region itself.
8925         VPBasicBlock *NextBlock =
8926             cast<VPBasicBlock>(TargetRegion->getSuccessors().front());
8927         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
8928       } else
8929         Sink->moveAfter(Target);
8930       continue;
8931     }
8932 
8933     // The sink source is in a replicate region. Unhook the region from the CFG.
8934     auto *SinkPred = SinkRegion->getSinglePredecessor();
8935     auto *SinkSucc = SinkRegion->getSingleSuccessor();
8936     VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion);
8937     VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc);
8938     VPBlockUtils::connectBlocks(SinkPred, SinkSucc);
8939 
8940     if (TargetRegion) {
8941       // The target recipe is also in a replicate region, move the sink region
8942       // after the target region.
8943       auto *TargetSucc = TargetRegion->getSingleSuccessor();
8944       VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc);
8945       VPBlockUtils::connectBlocks(TargetRegion, SinkRegion);
8946       VPBlockUtils::connectBlocks(SinkRegion, TargetSucc);
8947     } else {
8948       // The sink source is in a replicate region, we need to move the whole
8949       // replicate region, which should only contain a single recipe in the
8950       // main block.
8951       auto *SplitBlock =
8952           Target->getParent()->splitAt(std::next(Target->getIterator()));
8953 
8954       auto *SplitPred = SplitBlock->getSinglePredecessor();
8955 
8956       VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
8957       VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
8958       VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
8959     }
8960   }
8961 
8962   VPlanTransforms::removeRedundantCanonicalIVs(*Plan);
8963   VPlanTransforms::removeRedundantInductionCasts(*Plan);
8964 
8965   // Now that sink-after is done, move induction recipes for optimized truncates
8966   // to the phi section of the header block.
8967   for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove)
8968     Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
8969 
8970   // Adjust the recipes for any inloop reductions.
8971   adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExit()), Plan,
8972                              RecipeBuilder, Range.Start);
8973 
8974   // Introduce a recipe to combine the incoming and previous values of a
8975   // first-order recurrence.
8976   for (VPRecipeBase &R :
8977        Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
8978     auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R);
8979     if (!RecurPhi)
8980       continue;
8981 
8982     VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe();
8983     VPBasicBlock *InsertBlock = PrevRecipe->getParent();
8984     auto *Region = GetReplicateRegion(PrevRecipe);
8985     if (Region)
8986       InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor());
8987     if (Region || PrevRecipe->isPhi())
8988       Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
8989     else
8990       Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator()));
8991 
8992     auto *RecurSplice = cast<VPInstruction>(
8993         Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice,
8994                              {RecurPhi, RecurPhi->getBackedgeValue()}));
8995 
8996     RecurPhi->replaceAllUsesWith(RecurSplice);
8997     // Set the first operand of RecurSplice to RecurPhi again, after replacing
8998     // all users.
8999     RecurSplice->setOperand(0, RecurPhi);
9000   }
9001 
9002   // Interleave memory: for each Interleave Group we marked earlier as relevant
9003   // for this VPlan, replace the Recipes widening its memory instructions with a
9004   // single VPInterleaveRecipe at its insertion point.
9005   for (auto IG : InterleaveGroups) {
9006     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
9007         RecipeBuilder.getRecipe(IG->getInsertPos()));
9008     SmallVector<VPValue *, 4> StoredValues;
9009     for (unsigned i = 0; i < IG->getFactor(); ++i)
9010       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
9011         auto *StoreR =
9012             cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI));
9013         StoredValues.push_back(StoreR->getStoredValue());
9014       }
9015 
9016     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
9017                                         Recipe->getMask());
9018     VPIG->insertBefore(Recipe);
9019     unsigned J = 0;
9020     for (unsigned i = 0; i < IG->getFactor(); ++i)
9021       if (Instruction *Member = IG->getMember(i)) {
9022         if (!Member->getType()->isVoidTy()) {
9023           VPValue *OriginalV = Plan->getVPValue(Member);
9024           Plan->removeVPValueFor(Member);
9025           Plan->addVPValue(Member, VPIG->getVPValue(J));
9026           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
9027           J++;
9028         }
9029         RecipeBuilder.getRecipe(Member)->eraseFromParent();
9030       }
9031   }
9032 
9033   // From this point onwards, VPlan-to-VPlan transformations may change the plan
9034   // in ways that accessing values using original IR values is incorrect.
9035   Plan->disableValue2VPValue();
9036 
9037   VPlanTransforms::optimizeInductions(*Plan, *PSE.getSE());
9038   VPlanTransforms::sinkScalarOperands(*Plan);
9039   VPlanTransforms::mergeReplicateRegions(*Plan);
9040   VPlanTransforms::removeDeadRecipes(*Plan, *OrigLoop);
9041   VPlanTransforms::removeRedundantExpandSCEVRecipes(*Plan);
9042 
9043   std::string PlanName;
9044   raw_string_ostream RSO(PlanName);
9045   ElementCount VF = Range.Start;
9046   Plan->addVF(VF);
9047   RSO << "Initial VPlan for VF={" << VF;
9048   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
9049     Plan->addVF(VF);
9050     RSO << "," << VF;
9051   }
9052   RSO << "},UF>=1";
9053   RSO.flush();
9054   Plan->setName(PlanName);
9055 
9056   // Fold Exit block into its predecessor if possible.
9057   // TODO: Fold block earlier once all VPlan transforms properly maintain a
9058   // VPBasicBlock as exit.
9059   VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExit());
9060 
9061   assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid");
9062   return Plan;
9063 }
9064 
9065 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9066   // Outer loop handling: They may require CFG and instruction level
9067   // transformations before even evaluating whether vectorization is profitable.
9068   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9069   // the vectorization pipeline.
9070   assert(!OrigLoop->isInnermost());
9071   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9072 
9073   // Create new empty VPlan
9074   auto Plan = std::make_unique<VPlan>();
9075 
9076   // Build hierarchical CFG
9077   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9078   HCFGBuilder.buildHierarchicalCFG();
9079 
9080   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9081        VF *= 2)
9082     Plan->addVF(VF);
9083 
9084   if (EnableVPlanPredication) {
9085     VPlanPredicator VPP(*Plan);
9086     VPP.predicate();
9087 
9088     // Avoid running transformation to recipes until masked code generation in
9089     // VPlan-native path is in place.
9090     return Plan;
9091   }
9092 
9093   SmallPtrSet<Instruction *, 1> DeadInstructions;
9094   VPlanTransforms::VPInstructionsToVPRecipes(
9095       OrigLoop, Plan,
9096       [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); },
9097       DeadInstructions, *PSE.getSE());
9098 
9099   // Update plan to be compatible with the inner loop vectorizer for
9100   // code-generation.
9101   VPRegionBlock *LoopRegion = Plan->getVectorLoopRegion();
9102   VPBasicBlock *Preheader = LoopRegion->getEntryBasicBlock();
9103   VPBasicBlock *Exit = LoopRegion->getExitBasicBlock();
9104   VPBlockBase *Latch = Exit->getSinglePredecessor();
9105   VPBlockBase *Header = Preheader->getSingleSuccessor();
9106 
9107   // 1. Move preheader block out of main vector loop.
9108   Preheader->setParent(LoopRegion->getParent());
9109   VPBlockUtils::disconnectBlocks(Preheader, Header);
9110   VPBlockUtils::connectBlocks(Preheader, LoopRegion);
9111   Plan->setEntry(Preheader);
9112 
9113   // 2. Disconnect backedge and exit block.
9114   VPBlockUtils::disconnectBlocks(Latch, Header);
9115   VPBlockUtils::disconnectBlocks(Latch, Exit);
9116 
9117   // 3. Update entry and exit of main vector loop region.
9118   LoopRegion->setEntry(Header);
9119   LoopRegion->setExit(Latch);
9120 
9121   // 4. Remove exit block.
9122   delete Exit;
9123 
9124   addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(),
9125                         true, true);
9126   return Plan;
9127 }
9128 
9129 // Adjust the recipes for reductions. For in-loop reductions the chain of
9130 // instructions leading from the loop exit instr to the phi need to be converted
9131 // to reductions, with one operand being vector and the other being the scalar
9132 // reduction chain. For other reductions, a select is introduced between the phi
9133 // and live-out recipes when folding the tail.
9134 void LoopVectorizationPlanner::adjustRecipesForReductions(
9135     VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder,
9136     ElementCount MinVF) {
9137   for (auto &Reduction : CM.getInLoopReductionChains()) {
9138     PHINode *Phi = Reduction.first;
9139     const RecurrenceDescriptor &RdxDesc =
9140         Legal->getReductionVars().find(Phi)->second;
9141     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9142 
9143     if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc))
9144       continue;
9145 
9146     // ReductionOperations are orders top-down from the phi's use to the
9147     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9148     // which of the two operands will remain scalar and which will be reduced.
9149     // For minmax the chain will be the select instructions.
9150     Instruction *Chain = Phi;
9151     for (Instruction *R : ReductionOperations) {
9152       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9153       RecurKind Kind = RdxDesc.getRecurrenceKind();
9154 
9155       VPValue *ChainOp = Plan->getVPValue(Chain);
9156       unsigned FirstOpId;
9157       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
9158              "Only min/max recurrences allowed for inloop reductions");
9159       // Recognize a call to the llvm.fmuladd intrinsic.
9160       bool IsFMulAdd = (Kind == RecurKind::FMulAdd);
9161       assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) &&
9162              "Expected instruction to be a call to the llvm.fmuladd intrinsic");
9163       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9164         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9165                "Expected to replace a VPWidenSelectSC");
9166         FirstOpId = 1;
9167       } else {
9168         assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) ||
9169                 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) &&
9170                "Expected to replace a VPWidenSC");
9171         FirstOpId = 0;
9172       }
9173       unsigned VecOpId =
9174           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9175       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9176 
9177       auto *CondOp = CM.blockNeedsPredicationForAnyReason(R->getParent())
9178                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9179                          : nullptr;
9180 
9181       if (IsFMulAdd) {
9182         // If the instruction is a call to the llvm.fmuladd intrinsic then we
9183         // need to create an fmul recipe to use as the vector operand for the
9184         // fadd reduction.
9185         VPInstruction *FMulRecipe = new VPInstruction(
9186             Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))});
9187         FMulRecipe->setFastMathFlags(R->getFastMathFlags());
9188         WidenRecipe->getParent()->insert(FMulRecipe,
9189                                          WidenRecipe->getIterator());
9190         VecOp = FMulRecipe;
9191       }
9192       VPReductionRecipe *RedRecipe =
9193           new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9194       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9195       Plan->removeVPValueFor(R);
9196       Plan->addVPValue(R, RedRecipe);
9197       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9198       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9199       WidenRecipe->eraseFromParent();
9200 
9201       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9202         VPRecipeBase *CompareRecipe =
9203             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9204         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9205                "Expected to replace a VPWidenSC");
9206         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9207                "Expected no remaining users");
9208         CompareRecipe->eraseFromParent();
9209       }
9210       Chain = R;
9211     }
9212   }
9213 
9214   // If tail is folded by masking, introduce selects between the phi
9215   // and the live-out instruction of each reduction, at the beginning of the
9216   // dedicated latch block.
9217   if (CM.foldTailByMasking()) {
9218     Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin());
9219     for (VPRecipeBase &R :
9220          Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
9221       VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9222       if (!PhiR || PhiR->isInLoop())
9223         continue;
9224       VPValue *Cond =
9225           RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
9226       VPValue *Red = PhiR->getBackedgeValue();
9227       assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB &&
9228              "reduction recipe must be defined before latch");
9229       Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR});
9230     }
9231   }
9232 }
9233 
9234 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9235 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9236                                VPSlotTracker &SlotTracker) const {
9237   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9238   IG->getInsertPos()->printAsOperand(O, false);
9239   O << ", ";
9240   getAddr()->printAsOperand(O, SlotTracker);
9241   VPValue *Mask = getMask();
9242   if (Mask) {
9243     O << ", ";
9244     Mask->printAsOperand(O, SlotTracker);
9245   }
9246 
9247   unsigned OpIdx = 0;
9248   for (unsigned i = 0; i < IG->getFactor(); ++i) {
9249     if (!IG->getMember(i))
9250       continue;
9251     if (getNumStoreOperands() > 0) {
9252       O << "\n" << Indent << "  store ";
9253       getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker);
9254       O << " to index " << i;
9255     } else {
9256       O << "\n" << Indent << "  ";
9257       getVPValue(OpIdx)->printAsOperand(O, SlotTracker);
9258       O << " = load from index " << i;
9259     }
9260     ++OpIdx;
9261   }
9262 }
9263 #endif
9264 
9265 void VPWidenCallRecipe::execute(VPTransformState &State) {
9266   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9267                                   *this, State);
9268 }
9269 
9270 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9271   auto &I = *cast<SelectInst>(getUnderlyingInstr());
9272   State.ILV->setDebugLocFromInst(&I);
9273 
9274   // The condition can be loop invariant  but still defined inside the
9275   // loop. This means that we can't just use the original 'cond' value.
9276   // We have to take the 'vectorized' value and pick the first lane.
9277   // Instcombine will make this a no-op.
9278   auto *InvarCond =
9279       InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr;
9280 
9281   for (unsigned Part = 0; Part < State.UF; ++Part) {
9282     Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part);
9283     Value *Op0 = State.get(getOperand(1), Part);
9284     Value *Op1 = State.get(getOperand(2), Part);
9285     Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
9286     State.set(this, Sel, Part);
9287     State.ILV->addMetadata(Sel, &I);
9288   }
9289 }
9290 
9291 void VPWidenRecipe::execute(VPTransformState &State) {
9292   auto &I = *cast<Instruction>(getUnderlyingValue());
9293   auto &Builder = State.Builder;
9294   switch (I.getOpcode()) {
9295   case Instruction::Call:
9296   case Instruction::Br:
9297   case Instruction::PHI:
9298   case Instruction::GetElementPtr:
9299   case Instruction::Select:
9300     llvm_unreachable("This instruction is handled by a different recipe.");
9301   case Instruction::UDiv:
9302   case Instruction::SDiv:
9303   case Instruction::SRem:
9304   case Instruction::URem:
9305   case Instruction::Add:
9306   case Instruction::FAdd:
9307   case Instruction::Sub:
9308   case Instruction::FSub:
9309   case Instruction::FNeg:
9310   case Instruction::Mul:
9311   case Instruction::FMul:
9312   case Instruction::FDiv:
9313   case Instruction::FRem:
9314   case Instruction::Shl:
9315   case Instruction::LShr:
9316   case Instruction::AShr:
9317   case Instruction::And:
9318   case Instruction::Or:
9319   case Instruction::Xor: {
9320     // Just widen unops and binops.
9321     State.ILV->setDebugLocFromInst(&I);
9322 
9323     for (unsigned Part = 0; Part < State.UF; ++Part) {
9324       SmallVector<Value *, 2> Ops;
9325       for (VPValue *VPOp : operands())
9326         Ops.push_back(State.get(VPOp, Part));
9327 
9328       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
9329 
9330       if (auto *VecOp = dyn_cast<Instruction>(V)) {
9331         VecOp->copyIRFlags(&I);
9332 
9333         // If the instruction is vectorized and was in a basic block that needed
9334         // predication, we can't propagate poison-generating flags (nuw/nsw,
9335         // exact, etc.). The control flow has been linearized and the
9336         // instruction is no longer guarded by the predicate, which could make
9337         // the flag properties to no longer hold.
9338         if (State.MayGeneratePoisonRecipes.contains(this))
9339           VecOp->dropPoisonGeneratingFlags();
9340       }
9341 
9342       // Use this vector value for all users of the original instruction.
9343       State.set(this, V, Part);
9344       State.ILV->addMetadata(V, &I);
9345     }
9346 
9347     break;
9348   }
9349   case Instruction::ICmp:
9350   case Instruction::FCmp: {
9351     // Widen compares. Generate vector compares.
9352     bool FCmp = (I.getOpcode() == Instruction::FCmp);
9353     auto *Cmp = cast<CmpInst>(&I);
9354     State.ILV->setDebugLocFromInst(Cmp);
9355     for (unsigned Part = 0; Part < State.UF; ++Part) {
9356       Value *A = State.get(getOperand(0), Part);
9357       Value *B = State.get(getOperand(1), Part);
9358       Value *C = nullptr;
9359       if (FCmp) {
9360         // Propagate fast math flags.
9361         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
9362         Builder.setFastMathFlags(Cmp->getFastMathFlags());
9363         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
9364       } else {
9365         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
9366       }
9367       State.set(this, C, Part);
9368       State.ILV->addMetadata(C, &I);
9369     }
9370 
9371     break;
9372   }
9373 
9374   case Instruction::ZExt:
9375   case Instruction::SExt:
9376   case Instruction::FPToUI:
9377   case Instruction::FPToSI:
9378   case Instruction::FPExt:
9379   case Instruction::PtrToInt:
9380   case Instruction::IntToPtr:
9381   case Instruction::SIToFP:
9382   case Instruction::UIToFP:
9383   case Instruction::Trunc:
9384   case Instruction::FPTrunc:
9385   case Instruction::BitCast: {
9386     auto *CI = cast<CastInst>(&I);
9387     State.ILV->setDebugLocFromInst(CI);
9388 
9389     /// Vectorize casts.
9390     Type *DestTy = (State.VF.isScalar())
9391                        ? CI->getType()
9392                        : VectorType::get(CI->getType(), State.VF);
9393 
9394     for (unsigned Part = 0; Part < State.UF; ++Part) {
9395       Value *A = State.get(getOperand(0), Part);
9396       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
9397       State.set(this, Cast, Part);
9398       State.ILV->addMetadata(Cast, &I);
9399     }
9400     break;
9401   }
9402   default:
9403     // This instruction is not vectorized by simple widening.
9404     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
9405     llvm_unreachable("Unhandled instruction!");
9406   } // end of switch.
9407 }
9408 
9409 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9410   auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
9411   // Construct a vector GEP by widening the operands of the scalar GEP as
9412   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
9413   // results in a vector of pointers when at least one operand of the GEP
9414   // is vector-typed. Thus, to keep the representation compact, we only use
9415   // vector-typed operands for loop-varying values.
9416 
9417   if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
9418     // If we are vectorizing, but the GEP has only loop-invariant operands,
9419     // the GEP we build (by only using vector-typed operands for
9420     // loop-varying values) would be a scalar pointer. Thus, to ensure we
9421     // produce a vector of pointers, we need to either arbitrarily pick an
9422     // operand to broadcast, or broadcast a clone of the original GEP.
9423     // Here, we broadcast a clone of the original.
9424     //
9425     // TODO: If at some point we decide to scalarize instructions having
9426     //       loop-invariant operands, this special case will no longer be
9427     //       required. We would add the scalarization decision to
9428     //       collectLoopScalars() and teach getVectorValue() to broadcast
9429     //       the lane-zero scalar value.
9430     auto *Clone = State.Builder.Insert(GEP->clone());
9431     for (unsigned Part = 0; Part < State.UF; ++Part) {
9432       Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone);
9433       State.set(this, EntryPart, Part);
9434       State.ILV->addMetadata(EntryPart, GEP);
9435     }
9436   } else {
9437     // If the GEP has at least one loop-varying operand, we are sure to
9438     // produce a vector of pointers. But if we are only unrolling, we want
9439     // to produce a scalar GEP for each unroll part. Thus, the GEP we
9440     // produce with the code below will be scalar (if VF == 1) or vector
9441     // (otherwise). Note that for the unroll-only case, we still maintain
9442     // values in the vector mapping with initVector, as we do for other
9443     // instructions.
9444     for (unsigned Part = 0; Part < State.UF; ++Part) {
9445       // The pointer operand of the new GEP. If it's loop-invariant, we
9446       // won't broadcast it.
9447       auto *Ptr = IsPtrLoopInvariant
9448                       ? State.get(getOperand(0), VPIteration(0, 0))
9449                       : State.get(getOperand(0), Part);
9450 
9451       // Collect all the indices for the new GEP. If any index is
9452       // loop-invariant, we won't broadcast it.
9453       SmallVector<Value *, 4> Indices;
9454       for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
9455         VPValue *Operand = getOperand(I);
9456         if (IsIndexLoopInvariant[I - 1])
9457           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
9458         else
9459           Indices.push_back(State.get(Operand, Part));
9460       }
9461 
9462       // If the GEP instruction is vectorized and was in a basic block that
9463       // needed predication, we can't propagate the poison-generating 'inbounds'
9464       // flag. The control flow has been linearized and the GEP is no longer
9465       // guarded by the predicate, which could make the 'inbounds' properties to
9466       // no longer hold.
9467       bool IsInBounds =
9468           GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0;
9469 
9470       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
9471       // but it should be a vector, otherwise.
9472       auto *NewGEP = IsInBounds
9473                          ? State.Builder.CreateInBoundsGEP(
9474                                GEP->getSourceElementType(), Ptr, Indices)
9475                          : State.Builder.CreateGEP(GEP->getSourceElementType(),
9476                                                    Ptr, Indices);
9477       assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
9478              "NewGEP is not a pointer vector");
9479       State.set(this, NewGEP, Part);
9480       State.ILV->addMetadata(NewGEP, GEP);
9481     }
9482   }
9483 }
9484 
9485 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9486   assert(!State.Instance && "Int or FP induction being replicated.");
9487 
9488   Value *Start = getStartValue()->getLiveInIRValue();
9489   const InductionDescriptor &ID = getInductionDescriptor();
9490   TruncInst *Trunc = getTruncInst();
9491   IRBuilderBase &Builder = State.Builder;
9492   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
9493   assert(State.VF.isVector() && "must have vector VF");
9494 
9495   // The value from the original loop to which we are mapping the new induction
9496   // variable.
9497   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
9498 
9499   // Fast-math-flags propagate from the original induction instruction.
9500   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
9501   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
9502     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
9503 
9504   // Now do the actual transformations, and start with fetching the step value.
9505   Value *Step = State.get(getStepValue(), VPIteration(0, 0));
9506 
9507   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
9508          "Expected either an induction phi-node or a truncate of it!");
9509 
9510   // Construct the initial value of the vector IV in the vector loop preheader
9511   auto CurrIP = Builder.saveIP();
9512   BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
9513   Builder.SetInsertPoint(VectorPH->getTerminator());
9514   if (isa<TruncInst>(EntryVal)) {
9515     assert(Start->getType()->isIntegerTy() &&
9516            "Truncation requires an integer type");
9517     auto *TruncType = cast<IntegerType>(EntryVal->getType());
9518     Step = Builder.CreateTrunc(Step, TruncType);
9519     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
9520   }
9521 
9522   Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
9523   Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start);
9524   Value *SteppedStart = getStepVector(
9525       SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder);
9526 
9527   // We create vector phi nodes for both integer and floating-point induction
9528   // variables. Here, we determine the kind of arithmetic we will perform.
9529   Instruction::BinaryOps AddOp;
9530   Instruction::BinaryOps MulOp;
9531   if (Step->getType()->isIntegerTy()) {
9532     AddOp = Instruction::Add;
9533     MulOp = Instruction::Mul;
9534   } else {
9535     AddOp = ID.getInductionOpcode();
9536     MulOp = Instruction::FMul;
9537   }
9538 
9539   // Multiply the vectorization factor by the step using integer or
9540   // floating-point arithmetic as appropriate.
9541   Type *StepType = Step->getType();
9542   Value *RuntimeVF;
9543   if (Step->getType()->isFloatingPointTy())
9544     RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF);
9545   else
9546     RuntimeVF = getRuntimeVF(Builder, StepType, State.VF);
9547   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
9548 
9549   // Create a vector splat to use in the induction update.
9550   //
9551   // FIXME: If the step is non-constant, we create the vector splat with
9552   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
9553   //        handle a constant vector splat.
9554   Value *SplatVF = isa<Constant>(Mul)
9555                        ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul))
9556                        : Builder.CreateVectorSplat(State.VF, Mul);
9557   Builder.restoreIP(CurrIP);
9558 
9559   // We may need to add the step a number of times, depending on the unroll
9560   // factor. The last of those goes into the PHI.
9561   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
9562                                     &*State.CFG.PrevBB->getFirstInsertionPt());
9563   VecInd->setDebugLoc(EntryVal->getDebugLoc());
9564   Instruction *LastInduction = VecInd;
9565   for (unsigned Part = 0; Part < State.UF; ++Part) {
9566     State.set(this, LastInduction, Part);
9567 
9568     if (isa<TruncInst>(EntryVal))
9569       State.ILV->addMetadata(LastInduction, EntryVal);
9570 
9571     LastInduction = cast<Instruction>(
9572         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
9573     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
9574   }
9575 
9576   LastInduction->setName("vec.ind.next");
9577   VecInd->addIncoming(SteppedStart, VectorPH);
9578   // Add induction update using an incorrect block temporarily. The phi node
9579   // will be fixed after VPlan execution. Note that at this point the latch
9580   // block cannot be used, as it does not exist yet.
9581   // TODO: Model increment value in VPlan, by turning the recipe into a
9582   // multi-def and a subclass of VPHeaderPHIRecipe.
9583   VecInd->addIncoming(LastInduction, VectorPH);
9584 }
9585 
9586 void VPWidenPointerInductionRecipe::execute(VPTransformState &State) {
9587   assert(IndDesc.getKind() == InductionDescriptor::IK_PtrInduction &&
9588          "Not a pointer induction according to InductionDescriptor!");
9589   assert(cast<PHINode>(getUnderlyingInstr())->getType()->isPointerTy() &&
9590          "Unexpected type.");
9591 
9592   auto *IVR = getParent()->getPlan()->getCanonicalIV();
9593   PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0));
9594 
9595   if (all_of(users(), [this](const VPUser *U) {
9596         return cast<VPRecipeBase>(U)->usesScalars(this);
9597       })) {
9598     // This is the normalized GEP that starts counting at zero.
9599     Value *PtrInd = State.Builder.CreateSExtOrTrunc(
9600         CanonicalIV, IndDesc.getStep()->getType());
9601     // Determine the number of scalars we need to generate for each unroll
9602     // iteration. If the instruction is uniform, we only need to generate the
9603     // first lane. Otherwise, we generate all VF values.
9604     bool IsUniform = vputils::onlyFirstLaneUsed(this);
9605     assert((IsUniform || !State.VF.isScalable()) &&
9606            "Cannot scalarize a scalable VF");
9607     unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue();
9608 
9609     for (unsigned Part = 0; Part < State.UF; ++Part) {
9610       Value *PartStart =
9611           createStepForVF(State.Builder, PtrInd->getType(), State.VF, Part);
9612 
9613       for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
9614         Value *Idx = State.Builder.CreateAdd(
9615             PartStart, ConstantInt::get(PtrInd->getType(), Lane));
9616         Value *GlobalIdx = State.Builder.CreateAdd(PtrInd, Idx);
9617 
9618         Value *Step = CreateStepValue(IndDesc.getStep(), SE,
9619                                       State.CFG.PrevBB->getTerminator());
9620         Value *SclrGep = emitTransformedIndex(
9621             State.Builder, GlobalIdx, IndDesc.getStartValue(), Step, IndDesc);
9622         SclrGep->setName("next.gep");
9623         State.set(this, SclrGep, VPIteration(Part, Lane));
9624       }
9625     }
9626     return;
9627   }
9628 
9629   assert(isa<SCEVConstant>(IndDesc.getStep()) &&
9630          "Induction step not a SCEV constant!");
9631   Type *PhiType = IndDesc.getStep()->getType();
9632 
9633   // Build a pointer phi
9634   Value *ScalarStartValue = getStartValue()->getLiveInIRValue();
9635   Type *ScStValueType = ScalarStartValue->getType();
9636   PHINode *NewPointerPhi =
9637       PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV);
9638 
9639   BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
9640   NewPointerPhi->addIncoming(ScalarStartValue, VectorPH);
9641 
9642   // A pointer induction, performed by using a gep
9643   const DataLayout &DL = NewPointerPhi->getModule()->getDataLayout();
9644   Instruction *InductionLoc = &*State.Builder.GetInsertPoint();
9645 
9646   const SCEV *ScalarStep = IndDesc.getStep();
9647   SCEVExpander Exp(SE, DL, "induction");
9648   Value *ScalarStepValue = Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
9649   Value *RuntimeVF = getRuntimeVF(State.Builder, PhiType, State.VF);
9650   Value *NumUnrolledElems =
9651       State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
9652   Value *InductionGEP = GetElementPtrInst::Create(
9653       IndDesc.getElementType(), NewPointerPhi,
9654       State.Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
9655       InductionLoc);
9656   // Add induction update using an incorrect block temporarily. The phi node
9657   // will be fixed after VPlan execution. Note that at this point the latch
9658   // block cannot be used, as it does not exist yet.
9659   // TODO: Model increment value in VPlan, by turning the recipe into a
9660   // multi-def and a subclass of VPHeaderPHIRecipe.
9661   NewPointerPhi->addIncoming(InductionGEP, VectorPH);
9662 
9663   // Create UF many actual address geps that use the pointer
9664   // phi as base and a vectorized version of the step value
9665   // (<step*0, ..., step*N>) as offset.
9666   for (unsigned Part = 0; Part < State.UF; ++Part) {
9667     Type *VecPhiType = VectorType::get(PhiType, State.VF);
9668     Value *StartOffsetScalar =
9669         State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
9670     Value *StartOffset =
9671         State.Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
9672     // Create a vector of consecutive numbers from zero to VF.
9673     StartOffset = State.Builder.CreateAdd(
9674         StartOffset, State.Builder.CreateStepVector(VecPhiType));
9675 
9676     Value *GEP = State.Builder.CreateGEP(
9677         IndDesc.getElementType(), NewPointerPhi,
9678         State.Builder.CreateMul(
9679             StartOffset,
9680             State.Builder.CreateVectorSplat(State.VF, ScalarStepValue),
9681             "vector.gep"));
9682     State.set(this, GEP, Part);
9683   }
9684 }
9685 
9686 void VPScalarIVStepsRecipe::execute(VPTransformState &State) {
9687   assert(!State.Instance && "VPScalarIVStepsRecipe being replicated.");
9688 
9689   // Fast-math-flags propagate from the original induction instruction.
9690   IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
9691   if (IndDesc.getInductionBinOp() &&
9692       isa<FPMathOperator>(IndDesc.getInductionBinOp()))
9693     State.Builder.setFastMathFlags(
9694         IndDesc.getInductionBinOp()->getFastMathFlags());
9695 
9696   Value *Step = State.get(getStepValue(), VPIteration(0, 0));
9697   auto CreateScalarIV = [&](Value *&Step) -> Value * {
9698     Value *ScalarIV = State.get(getCanonicalIV(), VPIteration(0, 0));
9699     auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0);
9700     if (!isCanonical() || CanonicalIV->getType() != Ty) {
9701       ScalarIV =
9702           Ty->isIntegerTy()
9703               ? State.Builder.CreateSExtOrTrunc(ScalarIV, Ty)
9704               : State.Builder.CreateCast(Instruction::SIToFP, ScalarIV, Ty);
9705       ScalarIV = emitTransformedIndex(State.Builder, ScalarIV,
9706                                       getStartValue()->getLiveInIRValue(), Step,
9707                                       IndDesc);
9708       ScalarIV->setName("offset.idx");
9709     }
9710     if (TruncToTy) {
9711       assert(Step->getType()->isIntegerTy() &&
9712              "Truncation requires an integer step");
9713       ScalarIV = State.Builder.CreateTrunc(ScalarIV, TruncToTy);
9714       Step = State.Builder.CreateTrunc(Step, TruncToTy);
9715     }
9716     return ScalarIV;
9717   };
9718 
9719   Value *ScalarIV = CreateScalarIV(Step);
9720   if (State.VF.isVector()) {
9721     buildScalarSteps(ScalarIV, Step, IndDesc, this, State);
9722     return;
9723   }
9724 
9725   for (unsigned Part = 0; Part < State.UF; ++Part) {
9726     assert(!State.VF.isScalable() && "scalable vectors not yet supported.");
9727     Value *EntryPart;
9728     if (Step->getType()->isFloatingPointTy()) {
9729       Value *StartIdx =
9730           getRuntimeVFAsFloat(State.Builder, Step->getType(), State.VF * Part);
9731       // Floating-point operations inherit FMF via the builder's flags.
9732       Value *MulOp = State.Builder.CreateFMul(StartIdx, Step);
9733       EntryPart = State.Builder.CreateBinOp(IndDesc.getInductionOpcode(),
9734                                             ScalarIV, MulOp);
9735     } else {
9736       Value *StartIdx =
9737           getRuntimeVF(State.Builder, Step->getType(), State.VF * Part);
9738       EntryPart = State.Builder.CreateAdd(
9739           ScalarIV, State.Builder.CreateMul(StartIdx, Step), "induction");
9740     }
9741     State.set(this, EntryPart, Part);
9742   }
9743 }
9744 
9745 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9746   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this,
9747                                  State);
9748 }
9749 
9750 void VPBlendRecipe::execute(VPTransformState &State) {
9751   State.ILV->setDebugLocFromInst(Phi, &State.Builder);
9752   // We know that all PHIs in non-header blocks are converted into
9753   // selects, so we don't have to worry about the insertion order and we
9754   // can just use the builder.
9755   // At this point we generate the predication tree. There may be
9756   // duplications since this is a simple recursive scan, but future
9757   // optimizations will clean it up.
9758 
9759   unsigned NumIncoming = getNumIncomingValues();
9760 
9761   // Generate a sequence of selects of the form:
9762   // SELECT(Mask3, In3,
9763   //        SELECT(Mask2, In2,
9764   //               SELECT(Mask1, In1,
9765   //                      In0)))
9766   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9767   // are essentially undef are taken from In0.
9768   InnerLoopVectorizer::VectorParts Entry(State.UF);
9769   for (unsigned In = 0; In < NumIncoming; ++In) {
9770     for (unsigned Part = 0; Part < State.UF; ++Part) {
9771       // We might have single edge PHIs (blocks) - use an identity
9772       // 'select' for the first PHI operand.
9773       Value *In0 = State.get(getIncomingValue(In), Part);
9774       if (In == 0)
9775         Entry[Part] = In0; // Initialize with the first incoming value.
9776       else {
9777         // Select between the current value and the previous incoming edge
9778         // based on the incoming mask.
9779         Value *Cond = State.get(getMask(In), Part);
9780         Entry[Part] =
9781             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9782       }
9783     }
9784   }
9785   for (unsigned Part = 0; Part < State.UF; ++Part)
9786     State.set(this, Entry[Part], Part);
9787 }
9788 
9789 void VPInterleaveRecipe::execute(VPTransformState &State) {
9790   assert(!State.Instance && "Interleave group being replicated.");
9791   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9792                                       getStoredValues(), getMask());
9793 }
9794 
9795 void VPReductionRecipe::execute(VPTransformState &State) {
9796   assert(!State.Instance && "Reduction being replicated.");
9797   Value *PrevInChain = State.get(getChainOp(), 0);
9798   RecurKind Kind = RdxDesc->getRecurrenceKind();
9799   bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc);
9800   // Propagate the fast-math flags carried by the underlying instruction.
9801   IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
9802   State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags());
9803   for (unsigned Part = 0; Part < State.UF; ++Part) {
9804     Value *NewVecOp = State.get(getVecOp(), Part);
9805     if (VPValue *Cond = getCondOp()) {
9806       Value *NewCond = State.get(Cond, Part);
9807       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9808       Value *Iden = RdxDesc->getRecurrenceIdentity(
9809           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9810       Value *IdenVec =
9811           State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden);
9812       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9813       NewVecOp = Select;
9814     }
9815     Value *NewRed;
9816     Value *NextInChain;
9817     if (IsOrdered) {
9818       if (State.VF.isVector())
9819         NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9820                                         PrevInChain);
9821       else
9822         NewRed = State.Builder.CreateBinOp(
9823             (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain,
9824             NewVecOp);
9825       PrevInChain = NewRed;
9826     } else {
9827       PrevInChain = State.get(getChainOp(), Part);
9828       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9829     }
9830     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9831       NextInChain =
9832           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9833                          NewRed, PrevInChain);
9834     } else if (IsOrdered)
9835       NextInChain = NewRed;
9836     else
9837       NextInChain = State.Builder.CreateBinOp(
9838           (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed,
9839           PrevInChain);
9840     State.set(this, NextInChain, Part);
9841   }
9842 }
9843 
9844 void VPReplicateRecipe::execute(VPTransformState &State) {
9845   if (State.Instance) { // Generate a single instance.
9846     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9847     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance,
9848                                     IsPredicated, State);
9849     // Insert scalar instance packing it into a vector.
9850     if (AlsoPack && State.VF.isVector()) {
9851       // If we're constructing lane 0, initialize to start from poison.
9852       if (State.Instance->Lane.isFirstLane()) {
9853         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9854         Value *Poison = PoisonValue::get(
9855             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9856         State.set(this, Poison, State.Instance->Part);
9857       }
9858       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9859     }
9860     return;
9861   }
9862 
9863   // Generate scalar instances for all VF lanes of all UF parts, unless the
9864   // instruction is uniform inwhich case generate only the first lane for each
9865   // of the UF parts.
9866   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9867   assert((!State.VF.isScalable() || IsUniform) &&
9868          "Can't scalarize a scalable vector");
9869   for (unsigned Part = 0; Part < State.UF; ++Part)
9870     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9871       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this,
9872                                       VPIteration(Part, Lane), IsPredicated,
9873                                       State);
9874 }
9875 
9876 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9877   assert(State.Instance && "Branch on Mask works only on single instance.");
9878 
9879   unsigned Part = State.Instance->Part;
9880   unsigned Lane = State.Instance->Lane.getKnownLane();
9881 
9882   Value *ConditionBit = nullptr;
9883   VPValue *BlockInMask = getMask();
9884   if (BlockInMask) {
9885     ConditionBit = State.get(BlockInMask, Part);
9886     if (ConditionBit->getType()->isVectorTy())
9887       ConditionBit = State.Builder.CreateExtractElement(
9888           ConditionBit, State.Builder.getInt32(Lane));
9889   } else // Block in mask is all-one.
9890     ConditionBit = State.Builder.getTrue();
9891 
9892   // Replace the temporary unreachable terminator with a new conditional branch,
9893   // whose two destinations will be set later when they are created.
9894   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9895   assert(isa<UnreachableInst>(CurrentTerminator) &&
9896          "Expected to replace unreachable terminator with conditional branch.");
9897   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9898   CondBr->setSuccessor(0, nullptr);
9899   ReplaceInstWithInst(CurrentTerminator, CondBr);
9900 }
9901 
9902 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9903   assert(State.Instance && "Predicated instruction PHI works per instance.");
9904   Instruction *ScalarPredInst =
9905       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9906   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9907   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9908   assert(PredicatingBB && "Predicated block has no single predecessor.");
9909   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9910          "operand must be VPReplicateRecipe");
9911 
9912   // By current pack/unpack logic we need to generate only a single phi node: if
9913   // a vector value for the predicated instruction exists at this point it means
9914   // the instruction has vector users only, and a phi for the vector value is
9915   // needed. In this case the recipe of the predicated instruction is marked to
9916   // also do that packing, thereby "hoisting" the insert-element sequence.
9917   // Otherwise, a phi node for the scalar value is needed.
9918   unsigned Part = State.Instance->Part;
9919   if (State.hasVectorValue(getOperand(0), Part)) {
9920     Value *VectorValue = State.get(getOperand(0), Part);
9921     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9922     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9923     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9924     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9925     if (State.hasVectorValue(this, Part))
9926       State.reset(this, VPhi, Part);
9927     else
9928       State.set(this, VPhi, Part);
9929     // NOTE: Currently we need to update the value of the operand, so the next
9930     // predicated iteration inserts its generated value in the correct vector.
9931     State.reset(getOperand(0), VPhi, Part);
9932   } else {
9933     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9934     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9935     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9936                      PredicatingBB);
9937     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9938     if (State.hasScalarValue(this, *State.Instance))
9939       State.reset(this, Phi, *State.Instance);
9940     else
9941       State.set(this, Phi, *State.Instance);
9942     // NOTE: Currently we need to update the value of the operand, so the next
9943     // predicated iteration inserts its generated value in the correct vector.
9944     State.reset(getOperand(0), Phi, *State.Instance);
9945   }
9946 }
9947 
9948 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9949   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9950 
9951   // Attempt to issue a wide load.
9952   LoadInst *LI = dyn_cast<LoadInst>(&Ingredient);
9953   StoreInst *SI = dyn_cast<StoreInst>(&Ingredient);
9954 
9955   assert((LI || SI) && "Invalid Load/Store instruction");
9956   assert((!SI || StoredValue) && "No stored value provided for widened store");
9957   assert((!LI || !StoredValue) && "Stored value provided for widened load");
9958 
9959   Type *ScalarDataTy = getLoadStoreType(&Ingredient);
9960 
9961   auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
9962   const Align Alignment = getLoadStoreAlignment(&Ingredient);
9963   bool CreateGatherScatter = !Consecutive;
9964 
9965   auto &Builder = State.Builder;
9966   InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF);
9967   bool isMaskRequired = getMask();
9968   if (isMaskRequired)
9969     for (unsigned Part = 0; Part < State.UF; ++Part)
9970       BlockInMaskParts[Part] = State.get(getMask(), Part);
9971 
9972   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
9973     // Calculate the pointer for the specific unroll-part.
9974     GetElementPtrInst *PartPtr = nullptr;
9975 
9976     bool InBounds = false;
9977     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
9978       InBounds = gep->isInBounds();
9979     if (Reverse) {
9980       // If the address is consecutive but reversed, then the
9981       // wide store needs to start at the last vector element.
9982       // RunTimeVF =  VScale * VF.getKnownMinValue()
9983       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
9984       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF);
9985       // NumElt = -Part * RunTimeVF
9986       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
9987       // LastLane = 1 - RunTimeVF
9988       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
9989       PartPtr =
9990           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
9991       PartPtr->setIsInBounds(InBounds);
9992       PartPtr = cast<GetElementPtrInst>(
9993           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
9994       PartPtr->setIsInBounds(InBounds);
9995       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
9996         BlockInMaskParts[Part] =
9997             Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse");
9998     } else {
9999       Value *Increment =
10000           createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part);
10001       PartPtr = cast<GetElementPtrInst>(
10002           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
10003       PartPtr->setIsInBounds(InBounds);
10004     }
10005 
10006     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
10007     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
10008   };
10009 
10010   // Handle Stores:
10011   if (SI) {
10012     State.ILV->setDebugLocFromInst(SI);
10013 
10014     for (unsigned Part = 0; Part < State.UF; ++Part) {
10015       Instruction *NewSI = nullptr;
10016       Value *StoredVal = State.get(StoredValue, Part);
10017       if (CreateGatherScatter) {
10018         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
10019         Value *VectorGep = State.get(getAddr(), Part);
10020         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
10021                                             MaskPart);
10022       } else {
10023         if (Reverse) {
10024           // If we store to reverse consecutive memory locations, then we need
10025           // to reverse the order of elements in the stored value.
10026           StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
10027           // We don't want to update the value in the map as it might be used in
10028           // another expression. So don't call resetVectorValue(StoredVal).
10029         }
10030         auto *VecPtr =
10031             CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
10032         if (isMaskRequired)
10033           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
10034                                             BlockInMaskParts[Part]);
10035         else
10036           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
10037       }
10038       State.ILV->addMetadata(NewSI, SI);
10039     }
10040     return;
10041   }
10042 
10043   // Handle loads.
10044   assert(LI && "Must have a load instruction");
10045   State.ILV->setDebugLocFromInst(LI);
10046   for (unsigned Part = 0; Part < State.UF; ++Part) {
10047     Value *NewLI;
10048     if (CreateGatherScatter) {
10049       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
10050       Value *VectorGep = State.get(getAddr(), Part);
10051       NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart,
10052                                          nullptr, "wide.masked.gather");
10053       State.ILV->addMetadata(NewLI, LI);
10054     } else {
10055       auto *VecPtr =
10056           CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
10057       if (isMaskRequired)
10058         NewLI = Builder.CreateMaskedLoad(
10059             DataTy, VecPtr, Alignment, BlockInMaskParts[Part],
10060             PoisonValue::get(DataTy), "wide.masked.load");
10061       else
10062         NewLI =
10063             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
10064 
10065       // Add metadata to the load, but setVectorValue to the reverse shuffle.
10066       State.ILV->addMetadata(NewLI, LI);
10067       if (Reverse)
10068         NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
10069     }
10070 
10071     State.set(this, NewLI, Part);
10072   }
10073 }
10074 
10075 // Determine how to lower the scalar epilogue, which depends on 1) optimising
10076 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
10077 // predication, and 4) a TTI hook that analyses whether the loop is suitable
10078 // for predication.
10079 static ScalarEpilogueLowering getScalarEpilogueLowering(
10080     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
10081     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
10082     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
10083     LoopVectorizationLegality &LVL) {
10084   // 1) OptSize takes precedence over all other options, i.e. if this is set,
10085   // don't look at hints or options, and don't request a scalar epilogue.
10086   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
10087   // LoopAccessInfo (due to code dependency and not being able to reliably get
10088   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
10089   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
10090   // versioning when the vectorization is forced, unlike hasOptSize. So revert
10091   // back to the old way and vectorize with versioning when forced. See D81345.)
10092   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
10093                                                       PGSOQueryType::IRPass) &&
10094                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
10095     return CM_ScalarEpilogueNotAllowedOptSize;
10096 
10097   // 2) If set, obey the directives
10098   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
10099     switch (PreferPredicateOverEpilogue) {
10100     case PreferPredicateTy::ScalarEpilogue:
10101       return CM_ScalarEpilogueAllowed;
10102     case PreferPredicateTy::PredicateElseScalarEpilogue:
10103       return CM_ScalarEpilogueNotNeededUsePredicate;
10104     case PreferPredicateTy::PredicateOrDontVectorize:
10105       return CM_ScalarEpilogueNotAllowedUsePredicate;
10106     };
10107   }
10108 
10109   // 3) If set, obey the hints
10110   switch (Hints.getPredicate()) {
10111   case LoopVectorizeHints::FK_Enabled:
10112     return CM_ScalarEpilogueNotNeededUsePredicate;
10113   case LoopVectorizeHints::FK_Disabled:
10114     return CM_ScalarEpilogueAllowed;
10115   };
10116 
10117   // 4) if the TTI hook indicates this is profitable, request predication.
10118   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
10119                                        LVL.getLAI()))
10120     return CM_ScalarEpilogueNotNeededUsePredicate;
10121 
10122   return CM_ScalarEpilogueAllowed;
10123 }
10124 
10125 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
10126   // If Values have been set for this Def return the one relevant for \p Part.
10127   if (hasVectorValue(Def, Part))
10128     return Data.PerPartOutput[Def][Part];
10129 
10130   if (!hasScalarValue(Def, {Part, 0})) {
10131     Value *IRV = Def->getLiveInIRValue();
10132     Value *B = ILV->getBroadcastInstrs(IRV);
10133     set(Def, B, Part);
10134     return B;
10135   }
10136 
10137   Value *ScalarValue = get(Def, {Part, 0});
10138   // If we aren't vectorizing, we can just copy the scalar map values over
10139   // to the vector map.
10140   if (VF.isScalar()) {
10141     set(Def, ScalarValue, Part);
10142     return ScalarValue;
10143   }
10144 
10145   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
10146   bool IsUniform = RepR && RepR->isUniform();
10147 
10148   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
10149   // Check if there is a scalar value for the selected lane.
10150   if (!hasScalarValue(Def, {Part, LastLane})) {
10151     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
10152     assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) ||
10153             isa<VPScalarIVStepsRecipe>(Def->getDef())) &&
10154            "unexpected recipe found to be invariant");
10155     IsUniform = true;
10156     LastLane = 0;
10157   }
10158 
10159   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
10160   // Set the insert point after the last scalarized instruction or after the
10161   // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
10162   // will directly follow the scalar definitions.
10163   auto OldIP = Builder.saveIP();
10164   auto NewIP =
10165       isa<PHINode>(LastInst)
10166           ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
10167           : std::next(BasicBlock::iterator(LastInst));
10168   Builder.SetInsertPoint(&*NewIP);
10169 
10170   // However, if we are vectorizing, we need to construct the vector values.
10171   // If the value is known to be uniform after vectorization, we can just
10172   // broadcast the scalar value corresponding to lane zero for each unroll
10173   // iteration. Otherwise, we construct the vector values using
10174   // insertelement instructions. Since the resulting vectors are stored in
10175   // State, we will only generate the insertelements once.
10176   Value *VectorValue = nullptr;
10177   if (IsUniform) {
10178     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
10179     set(Def, VectorValue, Part);
10180   } else {
10181     // Initialize packing with insertelements to start from undef.
10182     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
10183     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
10184     set(Def, Undef, Part);
10185     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
10186       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
10187     VectorValue = get(Def, Part);
10188   }
10189   Builder.restoreIP(OldIP);
10190   return VectorValue;
10191 }
10192 
10193 // Process the loop in the VPlan-native vectorization path. This path builds
10194 // VPlan upfront in the vectorization pipeline, which allows to apply
10195 // VPlan-to-VPlan transformations from the very beginning without modifying the
10196 // input LLVM IR.
10197 static bool processLoopInVPlanNativePath(
10198     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
10199     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
10200     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
10201     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
10202     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
10203     LoopVectorizationRequirements &Requirements) {
10204 
10205   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
10206     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
10207     return false;
10208   }
10209   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
10210   Function *F = L->getHeader()->getParent();
10211   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
10212 
10213   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10214       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
10215 
10216   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
10217                                 &Hints, IAI);
10218   // Use the planner for outer loop vectorization.
10219   // TODO: CM is not used at this point inside the planner. Turn CM into an
10220   // optional argument if we don't need it in the future.
10221   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
10222                                Requirements, ORE);
10223 
10224   // Get user vectorization factor.
10225   ElementCount UserVF = Hints.getWidth();
10226 
10227   CM.collectElementTypesForWidening();
10228 
10229   // Plan how to best vectorize, return the best VF and its cost.
10230   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
10231 
10232   // If we are stress testing VPlan builds, do not attempt to generate vector
10233   // code. Masked vector code generation support will follow soon.
10234   // Also, do not attempt to vectorize if no vector code will be produced.
10235   if (VPlanBuildStressTest || EnableVPlanPredication ||
10236       VectorizationFactor::Disabled() == VF)
10237     return false;
10238 
10239   VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10240 
10241   {
10242     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10243                              F->getParent()->getDataLayout());
10244     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
10245                            &CM, BFI, PSI, Checks);
10246     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
10247                       << L->getHeader()->getParent()->getName() << "\"\n");
10248     LVP.executePlan(VF.Width, 1, BestPlan, LB, DT);
10249   }
10250 
10251   // Mark the loop as already vectorized to avoid vectorizing again.
10252   Hints.setAlreadyVectorized();
10253   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10254   return true;
10255 }
10256 
10257 // Emit a remark if there are stores to floats that required a floating point
10258 // extension. If the vectorized loop was generated with floating point there
10259 // will be a performance penalty from the conversion overhead and the change in
10260 // the vector width.
10261 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
10262   SmallVector<Instruction *, 4> Worklist;
10263   for (BasicBlock *BB : L->getBlocks()) {
10264     for (Instruction &Inst : *BB) {
10265       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
10266         if (S->getValueOperand()->getType()->isFloatTy())
10267           Worklist.push_back(S);
10268       }
10269     }
10270   }
10271 
10272   // Traverse the floating point stores upwards searching, for floating point
10273   // conversions.
10274   SmallPtrSet<const Instruction *, 4> Visited;
10275   SmallPtrSet<const Instruction *, 4> EmittedRemark;
10276   while (!Worklist.empty()) {
10277     auto *I = Worklist.pop_back_val();
10278     if (!L->contains(I))
10279       continue;
10280     if (!Visited.insert(I).second)
10281       continue;
10282 
10283     // Emit a remark if the floating point store required a floating
10284     // point conversion.
10285     // TODO: More work could be done to identify the root cause such as a
10286     // constant or a function return type and point the user to it.
10287     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
10288       ORE->emit([&]() {
10289         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
10290                                           I->getDebugLoc(), L->getHeader())
10291                << "floating point conversion changes vector width. "
10292                << "Mixed floating point precision requires an up/down "
10293                << "cast that will negatively impact performance.";
10294       });
10295 
10296     for (Use &Op : I->operands())
10297       if (auto *OpI = dyn_cast<Instruction>(Op))
10298         Worklist.push_back(OpI);
10299   }
10300 }
10301 
10302 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
10303     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
10304                                !EnableLoopInterleaving),
10305       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
10306                               !EnableLoopVectorization) {}
10307 
10308 bool LoopVectorizePass::processLoop(Loop *L) {
10309   assert((EnableVPlanNativePath || L->isInnermost()) &&
10310          "VPlan-native path is not enabled. Only process inner loops.");
10311 
10312 #ifndef NDEBUG
10313   const std::string DebugLocStr = getDebugLocString(L);
10314 #endif /* NDEBUG */
10315 
10316   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '"
10317                     << L->getHeader()->getParent()->getName() << "' from "
10318                     << DebugLocStr << "\n");
10319 
10320   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI);
10321 
10322   LLVM_DEBUG(
10323       dbgs() << "LV: Loop hints:"
10324              << " force="
10325              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
10326                      ? "disabled"
10327                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
10328                             ? "enabled"
10329                             : "?"))
10330              << " width=" << Hints.getWidth()
10331              << " interleave=" << Hints.getInterleave() << "\n");
10332 
10333   // Function containing loop
10334   Function *F = L->getHeader()->getParent();
10335 
10336   // Looking at the diagnostic output is the only way to determine if a loop
10337   // was vectorized (other than looking at the IR or machine code), so it
10338   // is important to generate an optimization remark for each loop. Most of
10339   // these messages are generated as OptimizationRemarkAnalysis. Remarks
10340   // generated as OptimizationRemark and OptimizationRemarkMissed are
10341   // less verbose reporting vectorized loops and unvectorized loops that may
10342   // benefit from vectorization, respectively.
10343 
10344   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
10345     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
10346     return false;
10347   }
10348 
10349   PredicatedScalarEvolution PSE(*SE, *L);
10350 
10351   // Check if it is legal to vectorize the loop.
10352   LoopVectorizationRequirements Requirements;
10353   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
10354                                 &Requirements, &Hints, DB, AC, BFI, PSI);
10355   if (!LVL.canVectorize(EnableVPlanNativePath)) {
10356     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
10357     Hints.emitRemarkWithHints();
10358     return false;
10359   }
10360 
10361   // Check the function attributes and profiles to find out if this function
10362   // should be optimized for size.
10363   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10364       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
10365 
10366   // Entrance to the VPlan-native vectorization path. Outer loops are processed
10367   // here. They may require CFG and instruction level transformations before
10368   // even evaluating whether vectorization is profitable. Since we cannot modify
10369   // the incoming IR, we need to build VPlan upfront in the vectorization
10370   // pipeline.
10371   if (!L->isInnermost())
10372     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
10373                                         ORE, BFI, PSI, Hints, Requirements);
10374 
10375   assert(L->isInnermost() && "Inner loop expected.");
10376 
10377   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
10378   // count by optimizing for size, to minimize overheads.
10379   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
10380   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
10381     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
10382                       << "This loop is worth vectorizing only if no scalar "
10383                       << "iteration overheads are incurred.");
10384     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
10385       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
10386     else {
10387       LLVM_DEBUG(dbgs() << "\n");
10388       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
10389     }
10390   }
10391 
10392   // Check the function attributes to see if implicit floats are allowed.
10393   // FIXME: This check doesn't seem possibly correct -- what if the loop is
10394   // an integer loop and the vector instructions selected are purely integer
10395   // vector instructions?
10396   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
10397     reportVectorizationFailure(
10398         "Can't vectorize when the NoImplicitFloat attribute is used",
10399         "loop not vectorized due to NoImplicitFloat attribute",
10400         "NoImplicitFloat", ORE, L);
10401     Hints.emitRemarkWithHints();
10402     return false;
10403   }
10404 
10405   // Check if the target supports potentially unsafe FP vectorization.
10406   // FIXME: Add a check for the type of safety issue (denormal, signaling)
10407   // for the target we're vectorizing for, to make sure none of the
10408   // additional fp-math flags can help.
10409   if (Hints.isPotentiallyUnsafe() &&
10410       TTI->isFPVectorizationPotentiallyUnsafe()) {
10411     reportVectorizationFailure(
10412         "Potentially unsafe FP op prevents vectorization",
10413         "loop not vectorized due to unsafe FP support.",
10414         "UnsafeFP", ORE, L);
10415     Hints.emitRemarkWithHints();
10416     return false;
10417   }
10418 
10419   bool AllowOrderedReductions;
10420   // If the flag is set, use that instead and override the TTI behaviour.
10421   if (ForceOrderedReductions.getNumOccurrences() > 0)
10422     AllowOrderedReductions = ForceOrderedReductions;
10423   else
10424     AllowOrderedReductions = TTI->enableOrderedReductions();
10425   if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
10426     ORE->emit([&]() {
10427       auto *ExactFPMathInst = Requirements.getExactFPInst();
10428       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10429                                                  ExactFPMathInst->getDebugLoc(),
10430                                                  ExactFPMathInst->getParent())
10431              << "loop not vectorized: cannot prove it is safe to reorder "
10432                 "floating-point operations";
10433     });
10434     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10435                          "reorder floating-point operations\n");
10436     Hints.emitRemarkWithHints();
10437     return false;
10438   }
10439 
10440   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
10441   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
10442 
10443   // If an override option has been passed in for interleaved accesses, use it.
10444   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
10445     UseInterleaved = EnableInterleavedMemAccesses;
10446 
10447   // Analyze interleaved memory accesses.
10448   if (UseInterleaved) {
10449     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
10450   }
10451 
10452   // Use the cost model.
10453   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10454                                 F, &Hints, IAI);
10455   CM.collectValuesToIgnore();
10456   CM.collectElementTypesForWidening();
10457 
10458   // Use the planner for vectorization.
10459   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
10460                                Requirements, ORE);
10461 
10462   // Get user vectorization factor and interleave count.
10463   ElementCount UserVF = Hints.getWidth();
10464   unsigned UserIC = Hints.getInterleave();
10465 
10466   // Plan how to best vectorize, return the best VF and its cost.
10467   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
10468 
10469   VectorizationFactor VF = VectorizationFactor::Disabled();
10470   unsigned IC = 1;
10471 
10472   if (MaybeVF) {
10473     VF = *MaybeVF;
10474     // Select the interleave count.
10475     IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue());
10476   }
10477 
10478   // Identify the diagnostic messages that should be produced.
10479   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10480   bool VectorizeLoop = true, InterleaveLoop = true;
10481   if (VF.Width.isScalar()) {
10482     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10483     VecDiagMsg = std::make_pair(
10484         "VectorizationNotBeneficial",
10485         "the cost-model indicates that vectorization is not beneficial");
10486     VectorizeLoop = false;
10487   }
10488 
10489   if (!MaybeVF && UserIC > 1) {
10490     // Tell the user interleaving was avoided up-front, despite being explicitly
10491     // requested.
10492     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10493                          "interleaving should be avoided up front\n");
10494     IntDiagMsg = std::make_pair(
10495         "InterleavingAvoided",
10496         "Ignoring UserIC, because interleaving was avoided up front");
10497     InterleaveLoop = false;
10498   } else if (IC == 1 && UserIC <= 1) {
10499     // Tell the user interleaving is not beneficial.
10500     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10501     IntDiagMsg = std::make_pair(
10502         "InterleavingNotBeneficial",
10503         "the cost-model indicates that interleaving is not beneficial");
10504     InterleaveLoop = false;
10505     if (UserIC == 1) {
10506       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10507       IntDiagMsg.second +=
10508           " and is explicitly disabled or interleave count is set to 1";
10509     }
10510   } else if (IC > 1 && UserIC == 1) {
10511     // Tell the user interleaving is beneficial, but it explicitly disabled.
10512     LLVM_DEBUG(
10513         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
10514     IntDiagMsg = std::make_pair(
10515         "InterleavingBeneficialButDisabled",
10516         "the cost-model indicates that interleaving is beneficial "
10517         "but is explicitly disabled or interleave count is set to 1");
10518     InterleaveLoop = false;
10519   }
10520 
10521   // Override IC if user provided an interleave count.
10522   IC = UserIC > 0 ? UserIC : IC;
10523 
10524   // Emit diagnostic messages, if any.
10525   const char *VAPassName = Hints.vectorizeAnalysisPassName();
10526   if (!VectorizeLoop && !InterleaveLoop) {
10527     // Do not vectorize or interleaving the loop.
10528     ORE->emit([&]() {
10529       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10530                                       L->getStartLoc(), L->getHeader())
10531              << VecDiagMsg.second;
10532     });
10533     ORE->emit([&]() {
10534       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10535                                       L->getStartLoc(), L->getHeader())
10536              << IntDiagMsg.second;
10537     });
10538     return false;
10539   } else if (!VectorizeLoop && InterleaveLoop) {
10540     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10541     ORE->emit([&]() {
10542       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10543                                         L->getStartLoc(), L->getHeader())
10544              << VecDiagMsg.second;
10545     });
10546   } else if (VectorizeLoop && !InterleaveLoop) {
10547     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10548                       << ") in " << DebugLocStr << '\n');
10549     ORE->emit([&]() {
10550       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10551                                         L->getStartLoc(), L->getHeader())
10552              << IntDiagMsg.second;
10553     });
10554   } else if (VectorizeLoop && InterleaveLoop) {
10555     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10556                       << ") in " << DebugLocStr << '\n');
10557     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10558   }
10559 
10560   bool DisableRuntimeUnroll = false;
10561   MDNode *OrigLoopID = L->getLoopID();
10562   {
10563     // Optimistically generate runtime checks. Drop them if they turn out to not
10564     // be profitable. Limit the scope of Checks, so the cleanup happens
10565     // immediately after vector codegeneration is done.
10566     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10567                              F->getParent()->getDataLayout());
10568     if (!VF.Width.isScalar() || IC > 1)
10569       Checks.Create(L, *LVL.getLAI(), PSE.getPredicate());
10570 
10571     using namespace ore;
10572     if (!VectorizeLoop) {
10573       assert(IC > 1 && "interleave count should not be 1 or 0");
10574       // If we decided that it is not legal to vectorize the loop, then
10575       // interleave it.
10576       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
10577                                  &CM, BFI, PSI, Checks);
10578 
10579       VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10580       LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT);
10581 
10582       ORE->emit([&]() {
10583         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10584                                   L->getHeader())
10585                << "interleaved loop (interleaved count: "
10586                << NV("InterleaveCount", IC) << ")";
10587       });
10588     } else {
10589       // If we decided that it is *legal* to vectorize the loop, then do it.
10590 
10591       // Consider vectorizing the epilogue too if it's profitable.
10592       VectorizationFactor EpilogueVF =
10593           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
10594       if (EpilogueVF.Width.isVector()) {
10595 
10596         // The first pass vectorizes the main loop and creates a scalar epilogue
10597         // to be vectorized by executing the plan (potentially with a different
10598         // factor) again shortly afterwards.
10599         EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1);
10600         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
10601                                            EPI, &LVL, &CM, BFI, PSI, Checks);
10602 
10603         VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF);
10604         LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV,
10605                         DT);
10606         ++LoopsVectorized;
10607 
10608         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10609         formLCSSARecursively(*L, *DT, LI, SE);
10610 
10611         // Second pass vectorizes the epilogue and adjusts the control flow
10612         // edges from the first pass.
10613         EPI.MainLoopVF = EPI.EpilogueVF;
10614         EPI.MainLoopUF = EPI.EpilogueUF;
10615         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
10616                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
10617                                                  Checks);
10618 
10619         VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF);
10620         BestEpiPlan.getVectorLoopRegion()->getEntryBasicBlock()->setName(
10621             "vec.epilog.vector.body");
10622 
10623         // Ensure that the start values for any VPReductionPHIRecipes are
10624         // updated before vectorising the epilogue loop.
10625         VPBasicBlock *Header =
10626             BestEpiPlan.getVectorLoopRegion()->getEntryBasicBlock();
10627         for (VPRecipeBase &R : Header->phis()) {
10628           if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
10629             if (auto *Resume = MainILV.getReductionResumeValue(
10630                     ReductionPhi->getRecurrenceDescriptor())) {
10631               VPValue *StartVal = BestEpiPlan.getOrAddExternalDef(Resume);
10632               ReductionPhi->setOperand(0, StartVal);
10633             }
10634           }
10635         }
10636 
10637         LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV,
10638                         DT);
10639         ++LoopsEpilogueVectorized;
10640 
10641         if (!MainILV.areSafetyChecksAdded())
10642           DisableRuntimeUnroll = true;
10643       } else {
10644         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
10645                                &LVL, &CM, BFI, PSI, Checks);
10646 
10647         VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10648         LVP.executePlan(VF.Width, IC, BestPlan, LB, DT);
10649         ++LoopsVectorized;
10650 
10651         // Add metadata to disable runtime unrolling a scalar loop when there
10652         // are no runtime checks about strides and memory. A scalar loop that is
10653         // rarely used is not worth unrolling.
10654         if (!LB.areSafetyChecksAdded())
10655           DisableRuntimeUnroll = true;
10656       }
10657       // Report the vectorization decision.
10658       ORE->emit([&]() {
10659         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
10660                                   L->getHeader())
10661                << "vectorized loop (vectorization width: "
10662                << NV("VectorizationFactor", VF.Width)
10663                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
10664       });
10665     }
10666 
10667     if (ORE->allowExtraAnalysis(LV_NAME))
10668       checkMixedPrecision(L, ORE);
10669   }
10670 
10671   Optional<MDNode *> RemainderLoopID =
10672       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
10673                                       LLVMLoopVectorizeFollowupEpilogue});
10674   if (RemainderLoopID.hasValue()) {
10675     L->setLoopID(RemainderLoopID.getValue());
10676   } else {
10677     if (DisableRuntimeUnroll)
10678       AddRuntimeUnrollDisableMetaData(L);
10679 
10680     // Mark the loop as already vectorized to avoid vectorizing again.
10681     Hints.setAlreadyVectorized();
10682   }
10683 
10684   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10685   return true;
10686 }
10687 
10688 LoopVectorizeResult LoopVectorizePass::runImpl(
10689     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
10690     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
10691     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
10692     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
10693     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
10694   SE = &SE_;
10695   LI = &LI_;
10696   TTI = &TTI_;
10697   DT = &DT_;
10698   BFI = &BFI_;
10699   TLI = TLI_;
10700   AA = &AA_;
10701   AC = &AC_;
10702   GetLAA = &GetLAA_;
10703   DB = &DB_;
10704   ORE = &ORE_;
10705   PSI = PSI_;
10706 
10707   // Don't attempt if
10708   // 1. the target claims to have no vector registers, and
10709   // 2. interleaving won't help ILP.
10710   //
10711   // The second condition is necessary because, even if the target has no
10712   // vector registers, loop vectorization may still enable scalar
10713   // interleaving.
10714   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10715       TTI->getMaxInterleaveFactor(1) < 2)
10716     return LoopVectorizeResult(false, false);
10717 
10718   bool Changed = false, CFGChanged = false;
10719 
10720   // The vectorizer requires loops to be in simplified form.
10721   // Since simplification may add new inner loops, it has to run before the
10722   // legality and profitability checks. This means running the loop vectorizer
10723   // will simplify all loops, regardless of whether anything end up being
10724   // vectorized.
10725   for (auto &L : *LI)
10726     Changed |= CFGChanged |=
10727         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10728 
10729   // Build up a worklist of inner-loops to vectorize. This is necessary as
10730   // the act of vectorizing or partially unrolling a loop creates new loops
10731   // and can invalidate iterators across the loops.
10732   SmallVector<Loop *, 8> Worklist;
10733 
10734   for (Loop *L : *LI)
10735     collectSupportedLoops(*L, LI, ORE, Worklist);
10736 
10737   LoopsAnalyzed += Worklist.size();
10738 
10739   // Now walk the identified inner loops.
10740   while (!Worklist.empty()) {
10741     Loop *L = Worklist.pop_back_val();
10742 
10743     // For the inner loops we actually process, form LCSSA to simplify the
10744     // transform.
10745     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10746 
10747     Changed |= CFGChanged |= processLoop(L);
10748   }
10749 
10750   // Process each loop nest in the function.
10751   return LoopVectorizeResult(Changed, CFGChanged);
10752 }
10753 
10754 PreservedAnalyses LoopVectorizePass::run(Function &F,
10755                                          FunctionAnalysisManager &AM) {
10756     auto &LI = AM.getResult<LoopAnalysis>(F);
10757     // There are no loops in the function. Return before computing other expensive
10758     // analyses.
10759     if (LI.empty())
10760       return PreservedAnalyses::all();
10761     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10762     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10763     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10764     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10765     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10766     auto &AA = AM.getResult<AAManager>(F);
10767     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10768     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10769     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10770 
10771     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10772     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10773         [&](Loop &L) -> const LoopAccessInfo & {
10774       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,      SE,
10775                                         TLI, TTI, nullptr, nullptr, nullptr};
10776       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10777     };
10778     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10779     ProfileSummaryInfo *PSI =
10780         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10781     LoopVectorizeResult Result =
10782         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10783     if (!Result.MadeAnyChange)
10784       return PreservedAnalyses::all();
10785     PreservedAnalyses PA;
10786 
10787     // We currently do not preserve loopinfo/dominator analyses with outer loop
10788     // vectorization. Until this is addressed, mark these analyses as preserved
10789     // only for non-VPlan-native path.
10790     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10791     if (!EnableVPlanNativePath) {
10792       PA.preserve<LoopAnalysis>();
10793       PA.preserve<DominatorTreeAnalysis>();
10794     }
10795 
10796     if (Result.MadeCFGChange) {
10797       // Making CFG changes likely means a loop got vectorized. Indicate that
10798       // extra simplification passes should be run.
10799       // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only
10800       // be run if runtime checks have been added.
10801       AM.getResult<ShouldRunExtraVectorPasses>(F);
10802       PA.preserve<ShouldRunExtraVectorPasses>();
10803     } else {
10804       PA.preserveSet<CFGAnalyses>();
10805     }
10806     return PA;
10807 }
10808 
10809 void LoopVectorizePass::printPipeline(
10810     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10811   static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10812       OS, MapClassName2PassName);
10813 
10814   OS << "<";
10815   OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10816   OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10817   OS << ">";
10818 }
10819