1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallVector.h"
74 #include "llvm/ADT/Statistic.h"
75 #include "llvm/ADT/StringRef.h"
76 #include "llvm/ADT/Twine.h"
77 #include "llvm/ADT/iterator_range.h"
78 #include "llvm/Analysis/AssumptionCache.h"
79 #include "llvm/Analysis/BasicAliasAnalysis.h"
80 #include "llvm/Analysis/BlockFrequencyInfo.h"
81 #include "llvm/Analysis/CFG.h"
82 #include "llvm/Analysis/CodeMetrics.h"
83 #include "llvm/Analysis/DemandedBits.h"
84 #include "llvm/Analysis/GlobalsModRef.h"
85 #include "llvm/Analysis/LoopAccessAnalysis.h"
86 #include "llvm/Analysis/LoopAnalysisManager.h"
87 #include "llvm/Analysis/LoopInfo.h"
88 #include "llvm/Analysis/LoopIterator.h"
89 #include "llvm/Analysis/MemorySSA.h"
90 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
91 #include "llvm/Analysis/ProfileSummaryInfo.h"
92 #include "llvm/Analysis/ScalarEvolution.h"
93 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
94 #include "llvm/Analysis/TargetLibraryInfo.h"
95 #include "llvm/Analysis/TargetTransformInfo.h"
96 #include "llvm/Analysis/VectorUtils.h"
97 #include "llvm/IR/Attributes.h"
98 #include "llvm/IR/BasicBlock.h"
99 #include "llvm/IR/CFG.h"
100 #include "llvm/IR/Constant.h"
101 #include "llvm/IR/Constants.h"
102 #include "llvm/IR/DataLayout.h"
103 #include "llvm/IR/DebugInfoMetadata.h"
104 #include "llvm/IR/DebugLoc.h"
105 #include "llvm/IR/DerivedTypes.h"
106 #include "llvm/IR/DiagnosticInfo.h"
107 #include "llvm/IR/Dominators.h"
108 #include "llvm/IR/Function.h"
109 #include "llvm/IR/IRBuilder.h"
110 #include "llvm/IR/InstrTypes.h"
111 #include "llvm/IR/Instruction.h"
112 #include "llvm/IR/Instructions.h"
113 #include "llvm/IR/IntrinsicInst.h"
114 #include "llvm/IR/Intrinsics.h"
115 #include "llvm/IR/LLVMContext.h"
116 #include "llvm/IR/Metadata.h"
117 #include "llvm/IR/Module.h"
118 #include "llvm/IR/Operator.h"
119 #include "llvm/IR/PatternMatch.h"
120 #include "llvm/IR/Type.h"
121 #include "llvm/IR/Use.h"
122 #include "llvm/IR/User.h"
123 #include "llvm/IR/Value.h"
124 #include "llvm/IR/ValueHandle.h"
125 #include "llvm/IR/Verifier.h"
126 #include "llvm/InitializePasses.h"
127 #include "llvm/Pass.h"
128 #include "llvm/Support/Casting.h"
129 #include "llvm/Support/CommandLine.h"
130 #include "llvm/Support/Compiler.h"
131 #include "llvm/Support/Debug.h"
132 #include "llvm/Support/ErrorHandling.h"
133 #include "llvm/Support/InstructionCost.h"
134 #include "llvm/Support/MathExtras.h"
135 #include "llvm/Support/raw_ostream.h"
136 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
137 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
138 #include "llvm/Transforms/Utils/LoopSimplify.h"
139 #include "llvm/Transforms/Utils/LoopUtils.h"
140 #include "llvm/Transforms/Utils/LoopVersioning.h"
141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
142 #include "llvm/Transforms/Utils/SizeOpts.h"
143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
144 #include <algorithm>
145 #include <cassert>
146 #include <cstdint>
147 #include <cstdlib>
148 #include <functional>
149 #include <iterator>
150 #include <limits>
151 #include <memory>
152 #include <string>
153 #include <tuple>
154 #include <utility>
155 
156 using namespace llvm;
157 
158 #define LV_NAME "loop-vectorize"
159 #define DEBUG_TYPE LV_NAME
160 
161 #ifndef NDEBUG
162 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
163 #endif
164 
165 /// @{
166 /// Metadata attribute names
167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
168 const char LLVMLoopVectorizeFollowupVectorized[] =
169     "llvm.loop.vectorize.followup_vectorized";
170 const char LLVMLoopVectorizeFollowupEpilogue[] =
171     "llvm.loop.vectorize.followup_epilogue";
172 /// @}
173 
174 STATISTIC(LoopsVectorized, "Number of loops vectorized");
175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
177 
178 static cl::opt<bool> EnableEpilogueVectorization(
179     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
180     cl::desc("Enable vectorization of epilogue loops."));
181 
182 static cl::opt<unsigned> EpilogueVectorizationForceVF(
183     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
184     cl::desc("When epilogue vectorization is enabled, and a value greater than "
185              "1 is specified, forces the given VF for all applicable epilogue "
186              "loops."));
187 
188 static cl::opt<unsigned> EpilogueVectorizationMinVF(
189     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
190     cl::desc("Only loops with vectorization factor equal to or larger than "
191              "the specified value are considered for epilogue vectorization."));
192 
193 /// Loops with a known constant trip count below this number are vectorized only
194 /// if no scalar iteration overheads are incurred.
195 static cl::opt<unsigned> TinyTripCountVectorThreshold(
196     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
197     cl::desc("Loops with a constant trip count that is smaller than this "
198              "value are vectorized only if no scalar iteration overheads "
199              "are incurred."));
200 
201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
202     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
203     cl::desc("The maximum allowed number of runtime memory checks with a "
204              "vectorize(enable) pragma."));
205 
206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
207 // that predication is preferred, and this lists all options. I.e., the
208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
209 // and predicate the instructions accordingly. If tail-folding fails, there are
210 // different fallback strategies depending on these values:
211 namespace PreferPredicateTy {
212   enum Option {
213     ScalarEpilogue = 0,
214     PredicateElseScalarEpilogue,
215     PredicateOrDontVectorize
216   };
217 } // namespace PreferPredicateTy
218 
219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
220     "prefer-predicate-over-epilogue",
221     cl::init(PreferPredicateTy::ScalarEpilogue),
222     cl::Hidden,
223     cl::desc("Tail-folding and predication preferences over creating a scalar "
224              "epilogue loop."),
225     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
226                          "scalar-epilogue",
227                          "Don't tail-predicate loops, create scalar epilogue"),
228               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
229                          "predicate-else-scalar-epilogue",
230                          "prefer tail-folding, create scalar epilogue if tail "
231                          "folding fails."),
232               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
233                          "predicate-dont-vectorize",
234                          "prefers tail-folding, don't attempt vectorization if "
235                          "tail-folding fails.")));
236 
237 static cl::opt<bool> MaximizeBandwidth(
238     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
239     cl::desc("Maximize bandwidth when selecting vectorization factor which "
240              "will be determined by the smallest type in loop."));
241 
242 static cl::opt<bool> EnableInterleavedMemAccesses(
243     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
244     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
245 
246 /// An interleave-group may need masking if it resides in a block that needs
247 /// predication, or in order to mask away gaps.
248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
249     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
250     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
251 
252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
253     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
254     cl::desc("We don't interleave loops with a estimated constant trip count "
255              "below this number"));
256 
257 static cl::opt<unsigned> ForceTargetNumScalarRegs(
258     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
259     cl::desc("A flag that overrides the target's number of scalar registers."));
260 
261 static cl::opt<unsigned> ForceTargetNumVectorRegs(
262     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
263     cl::desc("A flag that overrides the target's number of vector registers."));
264 
265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
266     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
267     cl::desc("A flag that overrides the target's max interleave factor for "
268              "scalar loops."));
269 
270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
271     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
272     cl::desc("A flag that overrides the target's max interleave factor for "
273              "vectorized loops."));
274 
275 static cl::opt<unsigned> ForceTargetInstructionCost(
276     "force-target-instruction-cost", cl::init(0), cl::Hidden,
277     cl::desc("A flag that overrides the target's expected cost for "
278              "an instruction to a single constant value. Mostly "
279              "useful for getting consistent testing."));
280 
281 static cl::opt<bool> ForceTargetSupportsScalableVectors(
282     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
283     cl::desc(
284         "Pretend that scalable vectors are supported, even if the target does "
285         "not support them. This flag should only be used for testing."));
286 
287 static cl::opt<unsigned> SmallLoopCost(
288     "small-loop-cost", cl::init(20), cl::Hidden,
289     cl::desc(
290         "The cost of a loop that is considered 'small' by the interleaver."));
291 
292 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
293     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
294     cl::desc("Enable the use of the block frequency analysis to access PGO "
295              "heuristics minimizing code growth in cold regions and being more "
296              "aggressive in hot regions."));
297 
298 // Runtime interleave loops for load/store throughput.
299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
300     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
301     cl::desc(
302         "Enable runtime interleaving until load/store ports are saturated"));
303 
304 /// Interleave small loops with scalar reductions.
305 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
306     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
307     cl::desc("Enable interleaving for loops with small iteration counts that "
308              "contain scalar reductions to expose ILP."));
309 
310 /// The number of stores in a loop that are allowed to need predication.
311 static cl::opt<unsigned> NumberOfStoresToPredicate(
312     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
313     cl::desc("Max number of stores to be predicated behind an if."));
314 
315 static cl::opt<bool> EnableIndVarRegisterHeur(
316     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
317     cl::desc("Count the induction variable only once when interleaving"));
318 
319 static cl::opt<bool> EnableCondStoresVectorization(
320     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
321     cl::desc("Enable if predication of stores during vectorization."));
322 
323 static cl::opt<unsigned> MaxNestedScalarReductionIC(
324     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
325     cl::desc("The maximum interleave count to use when interleaving a scalar "
326              "reduction in a nested loop."));
327 
328 static cl::opt<bool>
329     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
330                            cl::Hidden,
331                            cl::desc("Prefer in-loop vector reductions, "
332                                     "overriding the targets preference."));
333 
334 cl::opt<bool> EnableStrictReductions(
335     "enable-strict-reductions", cl::init(false), cl::Hidden,
336     cl::desc("Enable the vectorisation of loops with in-order (strict) "
337              "FP reductions"));
338 
339 static cl::opt<bool> PreferPredicatedReductionSelect(
340     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
341     cl::desc(
342         "Prefer predicating a reduction operation over an after loop select."));
343 
344 cl::opt<bool> EnableVPlanNativePath(
345     "enable-vplan-native-path", cl::init(false), cl::Hidden,
346     cl::desc("Enable VPlan-native vectorization path with "
347              "support for outer loop vectorization."));
348 
349 // FIXME: Remove this switch once we have divergence analysis. Currently we
350 // assume divergent non-backedge branches when this switch is true.
351 cl::opt<bool> EnableVPlanPredication(
352     "enable-vplan-predication", cl::init(false), cl::Hidden,
353     cl::desc("Enable VPlan-native vectorization path predicator with "
354              "support for outer loop vectorization."));
355 
356 // This flag enables the stress testing of the VPlan H-CFG construction in the
357 // VPlan-native vectorization path. It must be used in conjuction with
358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
359 // verification of the H-CFGs built.
360 static cl::opt<bool> VPlanBuildStressTest(
361     "vplan-build-stress-test", cl::init(false), cl::Hidden,
362     cl::desc(
363         "Build VPlan for every supported loop nest in the function and bail "
364         "out right after the build (stress test the VPlan H-CFG construction "
365         "in the VPlan-native vectorization path)."));
366 
367 cl::opt<bool> llvm::EnableLoopInterleaving(
368     "interleave-loops", cl::init(true), cl::Hidden,
369     cl::desc("Enable loop interleaving in Loop vectorization passes"));
370 cl::opt<bool> llvm::EnableLoopVectorization(
371     "vectorize-loops", cl::init(true), cl::Hidden,
372     cl::desc("Run the Loop vectorization passes"));
373 
374 cl::opt<bool> PrintVPlansInDotFormat(
375     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
376     cl::desc("Use dot format instead of plain text when dumping VPlans"));
377 
378 /// A helper function that returns the type of loaded or stored value.
379 static Type *getMemInstValueType(Value *I) {
380   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
381          "Expected Load or Store instruction");
382   if (auto *LI = dyn_cast<LoadInst>(I))
383     return LI->getType();
384   return cast<StoreInst>(I)->getValueOperand()->getType();
385 }
386 
387 /// A helper function that returns true if the given type is irregular. The
388 /// type is irregular if its allocated size doesn't equal the store size of an
389 /// element of the corresponding vector type.
390 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
391   // Determine if an array of N elements of type Ty is "bitcast compatible"
392   // with a <N x Ty> vector.
393   // This is only true if there is no padding between the array elements.
394   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
395 }
396 
397 /// A helper function that returns the reciprocal of the block probability of
398 /// predicated blocks. If we return X, we are assuming the predicated block
399 /// will execute once for every X iterations of the loop header.
400 ///
401 /// TODO: We should use actual block probability here, if available. Currently,
402 ///       we always assume predicated blocks have a 50% chance of executing.
403 static unsigned getReciprocalPredBlockProb() { return 2; }
404 
405 /// A helper function that returns an integer or floating-point constant with
406 /// value C.
407 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
408   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
409                            : ConstantFP::get(Ty, C);
410 }
411 
412 /// Returns "best known" trip count for the specified loop \p L as defined by
413 /// the following procedure:
414 ///   1) Returns exact trip count if it is known.
415 ///   2) Returns expected trip count according to profile data if any.
416 ///   3) Returns upper bound estimate if it is known.
417 ///   4) Returns None if all of the above failed.
418 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
419   // Check if exact trip count is known.
420   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
421     return ExpectedTC;
422 
423   // Check if there is an expected trip count available from profile data.
424   if (LoopVectorizeWithBlockFrequency)
425     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
426       return EstimatedTC;
427 
428   // Check if upper bound estimate is known.
429   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
430     return ExpectedTC;
431 
432   return None;
433 }
434 
435 // Forward declare GeneratedRTChecks.
436 class GeneratedRTChecks;
437 
438 namespace llvm {
439 
440 /// InnerLoopVectorizer vectorizes loops which contain only one basic
441 /// block to a specified vectorization factor (VF).
442 /// This class performs the widening of scalars into vectors, or multiple
443 /// scalars. This class also implements the following features:
444 /// * It inserts an epilogue loop for handling loops that don't have iteration
445 ///   counts that are known to be a multiple of the vectorization factor.
446 /// * It handles the code generation for reduction variables.
447 /// * Scalarization (implementation using scalars) of un-vectorizable
448 ///   instructions.
449 /// InnerLoopVectorizer does not perform any vectorization-legality
450 /// checks, and relies on the caller to check for the different legality
451 /// aspects. The InnerLoopVectorizer relies on the
452 /// LoopVectorizationLegality class to provide information about the induction
453 /// and reduction variables that were found to a given vectorization factor.
454 class InnerLoopVectorizer {
455 public:
456   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
457                       LoopInfo *LI, DominatorTree *DT,
458                       const TargetLibraryInfo *TLI,
459                       const TargetTransformInfo *TTI, AssumptionCache *AC,
460                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
461                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
462                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
463                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
464       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
465         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
466         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
467         PSI(PSI), RTChecks(RTChecks) {
468     // Query this against the original loop and save it here because the profile
469     // of the original loop header may change as the transformation happens.
470     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
471         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
472   }
473 
474   virtual ~InnerLoopVectorizer() = default;
475 
476   /// Create a new empty loop that will contain vectorized instructions later
477   /// on, while the old loop will be used as the scalar remainder. Control flow
478   /// is generated around the vectorized (and scalar epilogue) loops consisting
479   /// of various checks and bypasses. Return the pre-header block of the new
480   /// loop.
481   /// In the case of epilogue vectorization, this function is overriden to
482   /// handle the more complex control flow around the loops.
483   virtual BasicBlock *createVectorizedLoopSkeleton();
484 
485   /// Widen a single instruction within the innermost loop.
486   void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands,
487                         VPTransformState &State);
488 
489   /// Widen a single call instruction within the innermost loop.
490   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
491                             VPTransformState &State);
492 
493   /// Widen a single select instruction within the innermost loop.
494   void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands,
495                               bool InvariantCond, VPTransformState &State);
496 
497   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
498   void fixVectorizedLoop(VPTransformState &State);
499 
500   // Return true if any runtime check is added.
501   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
502 
503   /// A type for vectorized values in the new loop. Each value from the
504   /// original loop, when vectorized, is represented by UF vector values in the
505   /// new unrolled loop, where UF is the unroll factor.
506   using VectorParts = SmallVector<Value *, 2>;
507 
508   /// Vectorize a single GetElementPtrInst based on information gathered and
509   /// decisions taken during planning.
510   void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices,
511                 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant,
512                 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State);
513 
514   /// Vectorize a single PHINode in a block. This method handles the induction
515   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
516   /// arbitrary length vectors.
517   void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc,
518                            VPWidenPHIRecipe *PhiR, VPTransformState &State);
519 
520   /// A helper function to scalarize a single Instruction in the innermost loop.
521   /// Generates a sequence of scalar instances for each lane between \p MinLane
522   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
523   /// inclusive. Uses the VPValue operands from \p Operands instead of \p
524   /// Instr's operands.
525   void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands,
526                             const VPIteration &Instance, bool IfPredicateInstr,
527                             VPTransformState &State);
528 
529   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
530   /// is provided, the integer induction variable will first be truncated to
531   /// the corresponding type.
532   void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc,
533                              VPValue *Def, VPValue *CastDef,
534                              VPTransformState &State);
535 
536   /// Construct the vector value of a scalarized value \p V one lane at a time.
537   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
538                                  VPTransformState &State);
539 
540   /// Try to vectorize interleaved access group \p Group with the base address
541   /// given in \p Addr, optionally masking the vector operations if \p
542   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
543   /// values in the vectorized loop.
544   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
545                                 ArrayRef<VPValue *> VPDefs,
546                                 VPTransformState &State, VPValue *Addr,
547                                 ArrayRef<VPValue *> StoredValues,
548                                 VPValue *BlockInMask = nullptr);
549 
550   /// Vectorize Load and Store instructions with the base address given in \p
551   /// Addr, optionally masking the vector operations if \p BlockInMask is
552   /// non-null. Use \p State to translate given VPValues to IR values in the
553   /// vectorized loop.
554   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
555                                   VPValue *Def, VPValue *Addr,
556                                   VPValue *StoredValue, VPValue *BlockInMask);
557 
558   /// Set the debug location in the builder using the debug location in
559   /// the instruction.
560   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
561 
562   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
563   void fixNonInductionPHIs(VPTransformState &State);
564 
565   /// Create a broadcast instruction. This method generates a broadcast
566   /// instruction (shuffle) for loop invariant values and for the induction
567   /// value. If this is the induction variable then we extend it to N, N+1, ...
568   /// this is needed because each iteration in the loop corresponds to a SIMD
569   /// element.
570   virtual Value *getBroadcastInstrs(Value *V);
571 
572 protected:
573   friend class LoopVectorizationPlanner;
574 
575   /// A small list of PHINodes.
576   using PhiVector = SmallVector<PHINode *, 4>;
577 
578   /// A type for scalarized values in the new loop. Each value from the
579   /// original loop, when scalarized, is represented by UF x VF scalar values
580   /// in the new unrolled loop, where UF is the unroll factor and VF is the
581   /// vectorization factor.
582   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
583 
584   /// Set up the values of the IVs correctly when exiting the vector loop.
585   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
586                     Value *CountRoundDown, Value *EndValue,
587                     BasicBlock *MiddleBlock);
588 
589   /// Create a new induction variable inside L.
590   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
591                                    Value *Step, Instruction *DL);
592 
593   /// Handle all cross-iteration phis in the header.
594   void fixCrossIterationPHIs(VPTransformState &State);
595 
596   /// Fix a first-order recurrence. This is the second phase of vectorizing
597   /// this phi node.
598   void fixFirstOrderRecurrence(PHINode *Phi, VPTransformState &State);
599 
600   /// Fix a reduction cross-iteration phi. This is the second phase of
601   /// vectorizing this phi node.
602   void fixReduction(VPWidenPHIRecipe *Phi, VPTransformState &State);
603 
604   /// Clear NSW/NUW flags from reduction instructions if necessary.
605   void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc,
606                                VPTransformState &State);
607 
608   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
609   /// means we need to add the appropriate incoming value from the middle
610   /// block as exiting edges from the scalar epilogue loop (if present) are
611   /// already in place, and we exit the vector loop exclusively to the middle
612   /// block.
613   void fixLCSSAPHIs(VPTransformState &State);
614 
615   /// Iteratively sink the scalarized operands of a predicated instruction into
616   /// the block that was created for it.
617   void sinkScalarOperands(Instruction *PredInst);
618 
619   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
620   /// represented as.
621   void truncateToMinimalBitwidths(VPTransformState &State);
622 
623   /// This function adds
624   /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
625   /// to each vector element of Val. The sequence starts at StartIndex.
626   /// \p Opcode is relevant for FP induction variable.
627   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
628                                Instruction::BinaryOps Opcode =
629                                Instruction::BinaryOpsEnd);
630 
631   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
632   /// variable on which to base the steps, \p Step is the size of the step, and
633   /// \p EntryVal is the value from the original loop that maps to the steps.
634   /// Note that \p EntryVal doesn't have to be an induction variable - it
635   /// can also be a truncate instruction.
636   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
637                         const InductionDescriptor &ID, VPValue *Def,
638                         VPValue *CastDef, VPTransformState &State);
639 
640   /// Create a vector induction phi node based on an existing scalar one. \p
641   /// EntryVal is the value from the original loop that maps to the vector phi
642   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
643   /// truncate instruction, instead of widening the original IV, we widen a
644   /// version of the IV truncated to \p EntryVal's type.
645   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
646                                        Value *Step, Value *Start,
647                                        Instruction *EntryVal, VPValue *Def,
648                                        VPValue *CastDef,
649                                        VPTransformState &State);
650 
651   /// Returns true if an instruction \p I should be scalarized instead of
652   /// vectorized for the chosen vectorization factor.
653   bool shouldScalarizeInstruction(Instruction *I) const;
654 
655   /// Returns true if we should generate a scalar version of \p IV.
656   bool needsScalarInduction(Instruction *IV) const;
657 
658   /// If there is a cast involved in the induction variable \p ID, which should
659   /// be ignored in the vectorized loop body, this function records the
660   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
661   /// cast. We had already proved that the casted Phi is equal to the uncasted
662   /// Phi in the vectorized loop (under a runtime guard), and therefore
663   /// there is no need to vectorize the cast - the same value can be used in the
664   /// vector loop for both the Phi and the cast.
665   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
666   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
667   ///
668   /// \p EntryVal is the value from the original loop that maps to the vector
669   /// phi node and is used to distinguish what is the IV currently being
670   /// processed - original one (if \p EntryVal is a phi corresponding to the
671   /// original IV) or the "newly-created" one based on the proof mentioned above
672   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
673   /// latter case \p EntryVal is a TruncInst and we must not record anything for
674   /// that IV, but it's error-prone to expect callers of this routine to care
675   /// about that, hence this explicit parameter.
676   void recordVectorLoopValueForInductionCast(
677       const InductionDescriptor &ID, const Instruction *EntryVal,
678       Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State,
679       unsigned Part, unsigned Lane = UINT_MAX);
680 
681   /// Generate a shuffle sequence that will reverse the vector Vec.
682   virtual Value *reverseVector(Value *Vec);
683 
684   /// Returns (and creates if needed) the original loop trip count.
685   Value *getOrCreateTripCount(Loop *NewLoop);
686 
687   /// Returns (and creates if needed) the trip count of the widened loop.
688   Value *getOrCreateVectorTripCount(Loop *NewLoop);
689 
690   /// Returns a bitcasted value to the requested vector type.
691   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
692   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
693                                 const DataLayout &DL);
694 
695   /// Emit a bypass check to see if the vector trip count is zero, including if
696   /// it overflows.
697   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
698 
699   /// Emit a bypass check to see if all of the SCEV assumptions we've
700   /// had to make are correct. Returns the block containing the checks or
701   /// nullptr if no checks have been added.
702   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass);
703 
704   /// Emit bypass checks to check any memory assumptions we may have made.
705   /// Returns the block containing the checks or nullptr if no checks have been
706   /// added.
707   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
708 
709   /// Compute the transformed value of Index at offset StartValue using step
710   /// StepValue.
711   /// For integer induction, returns StartValue + Index * StepValue.
712   /// For pointer induction, returns StartValue[Index * StepValue].
713   /// FIXME: The newly created binary instructions should contain nsw/nuw
714   /// flags, which can be found from the original scalar operations.
715   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
716                               const DataLayout &DL,
717                               const InductionDescriptor &ID) const;
718 
719   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
720   /// vector loop preheader, middle block and scalar preheader. Also
721   /// allocate a loop object for the new vector loop and return it.
722   Loop *createVectorLoopSkeleton(StringRef Prefix);
723 
724   /// Create new phi nodes for the induction variables to resume iteration count
725   /// in the scalar epilogue, from where the vectorized loop left off (given by
726   /// \p VectorTripCount).
727   /// In cases where the loop skeleton is more complicated (eg. epilogue
728   /// vectorization) and the resume values can come from an additional bypass
729   /// block, the \p AdditionalBypass pair provides information about the bypass
730   /// block and the end value on the edge from bypass to this loop.
731   void createInductionResumeValues(
732       Loop *L, Value *VectorTripCount,
733       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
734 
735   /// Complete the loop skeleton by adding debug MDs, creating appropriate
736   /// conditional branches in the middle block, preparing the builder and
737   /// running the verifier. Take in the vector loop \p L as argument, and return
738   /// the preheader of the completed vector loop.
739   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
740 
741   /// Add additional metadata to \p To that was not present on \p Orig.
742   ///
743   /// Currently this is used to add the noalias annotations based on the
744   /// inserted memchecks.  Use this for instructions that are *cloned* into the
745   /// vector loop.
746   void addNewMetadata(Instruction *To, const Instruction *Orig);
747 
748   /// Add metadata from one instruction to another.
749   ///
750   /// This includes both the original MDs from \p From and additional ones (\see
751   /// addNewMetadata).  Use this for *newly created* instructions in the vector
752   /// loop.
753   void addMetadata(Instruction *To, Instruction *From);
754 
755   /// Similar to the previous function but it adds the metadata to a
756   /// vector of instructions.
757   void addMetadata(ArrayRef<Value *> To, Instruction *From);
758 
759   /// Allow subclasses to override and print debug traces before/after vplan
760   /// execution, when trace information is requested.
761   virtual void printDebugTracesAtStart(){};
762   virtual void printDebugTracesAtEnd(){};
763 
764   /// The original loop.
765   Loop *OrigLoop;
766 
767   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
768   /// dynamic knowledge to simplify SCEV expressions and converts them to a
769   /// more usable form.
770   PredicatedScalarEvolution &PSE;
771 
772   /// Loop Info.
773   LoopInfo *LI;
774 
775   /// Dominator Tree.
776   DominatorTree *DT;
777 
778   /// Alias Analysis.
779   AAResults *AA;
780 
781   /// Target Library Info.
782   const TargetLibraryInfo *TLI;
783 
784   /// Target Transform Info.
785   const TargetTransformInfo *TTI;
786 
787   /// Assumption Cache.
788   AssumptionCache *AC;
789 
790   /// Interface to emit optimization remarks.
791   OptimizationRemarkEmitter *ORE;
792 
793   /// LoopVersioning.  It's only set up (non-null) if memchecks were
794   /// used.
795   ///
796   /// This is currently only used to add no-alias metadata based on the
797   /// memchecks.  The actually versioning is performed manually.
798   std::unique_ptr<LoopVersioning> LVer;
799 
800   /// The vectorization SIMD factor to use. Each vector will have this many
801   /// vector elements.
802   ElementCount VF;
803 
804   /// The vectorization unroll factor to use. Each scalar is vectorized to this
805   /// many different vector instructions.
806   unsigned UF;
807 
808   /// The builder that we use
809   IRBuilder<> Builder;
810 
811   // --- Vectorization state ---
812 
813   /// The vector-loop preheader.
814   BasicBlock *LoopVectorPreHeader;
815 
816   /// The scalar-loop preheader.
817   BasicBlock *LoopScalarPreHeader;
818 
819   /// Middle Block between the vector and the scalar.
820   BasicBlock *LoopMiddleBlock;
821 
822   /// The (unique) ExitBlock of the scalar loop.  Note that
823   /// there can be multiple exiting edges reaching this block.
824   BasicBlock *LoopExitBlock;
825 
826   /// The vector loop body.
827   BasicBlock *LoopVectorBody;
828 
829   /// The scalar loop body.
830   BasicBlock *LoopScalarBody;
831 
832   /// A list of all bypass blocks. The first block is the entry of the loop.
833   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
834 
835   /// The new Induction variable which was added to the new block.
836   PHINode *Induction = nullptr;
837 
838   /// The induction variable of the old basic block.
839   PHINode *OldInduction = nullptr;
840 
841   /// Store instructions that were predicated.
842   SmallVector<Instruction *, 4> PredicatedInstructions;
843 
844   /// Trip count of the original loop.
845   Value *TripCount = nullptr;
846 
847   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
848   Value *VectorTripCount = nullptr;
849 
850   /// The legality analysis.
851   LoopVectorizationLegality *Legal;
852 
853   /// The profitablity analysis.
854   LoopVectorizationCostModel *Cost;
855 
856   // Record whether runtime checks are added.
857   bool AddedSafetyChecks = false;
858 
859   // Holds the end values for each induction variable. We save the end values
860   // so we can later fix-up the external users of the induction variables.
861   DenseMap<PHINode *, Value *> IVEndValues;
862 
863   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
864   // fixed up at the end of vector code generation.
865   SmallVector<PHINode *, 8> OrigPHIsToFix;
866 
867   /// BFI and PSI are used to check for profile guided size optimizations.
868   BlockFrequencyInfo *BFI;
869   ProfileSummaryInfo *PSI;
870 
871   // Whether this loop should be optimized for size based on profile guided size
872   // optimizatios.
873   bool OptForSizeBasedOnProfile;
874 
875   /// Structure to hold information about generated runtime checks, responsible
876   /// for cleaning the checks, if vectorization turns out unprofitable.
877   GeneratedRTChecks &RTChecks;
878 };
879 
880 class InnerLoopUnroller : public InnerLoopVectorizer {
881 public:
882   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
883                     LoopInfo *LI, DominatorTree *DT,
884                     const TargetLibraryInfo *TLI,
885                     const TargetTransformInfo *TTI, AssumptionCache *AC,
886                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
887                     LoopVectorizationLegality *LVL,
888                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
889                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
890       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
891                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
892                             BFI, PSI, Check) {}
893 
894 private:
895   Value *getBroadcastInstrs(Value *V) override;
896   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
897                        Instruction::BinaryOps Opcode =
898                        Instruction::BinaryOpsEnd) override;
899   Value *reverseVector(Value *Vec) override;
900 };
901 
902 /// Encapsulate information regarding vectorization of a loop and its epilogue.
903 /// This information is meant to be updated and used across two stages of
904 /// epilogue vectorization.
905 struct EpilogueLoopVectorizationInfo {
906   ElementCount MainLoopVF = ElementCount::getFixed(0);
907   unsigned MainLoopUF = 0;
908   ElementCount EpilogueVF = ElementCount::getFixed(0);
909   unsigned EpilogueUF = 0;
910   BasicBlock *MainLoopIterationCountCheck = nullptr;
911   BasicBlock *EpilogueIterationCountCheck = nullptr;
912   BasicBlock *SCEVSafetyCheck = nullptr;
913   BasicBlock *MemSafetyCheck = nullptr;
914   Value *TripCount = nullptr;
915   Value *VectorTripCount = nullptr;
916 
917   EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF,
918                                 unsigned EUF)
919       : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF),
920         EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) {
921     assert(EUF == 1 &&
922            "A high UF for the epilogue loop is likely not beneficial.");
923   }
924 };
925 
926 /// An extension of the inner loop vectorizer that creates a skeleton for a
927 /// vectorized loop that has its epilogue (residual) also vectorized.
928 /// The idea is to run the vplan on a given loop twice, firstly to setup the
929 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
930 /// from the first step and vectorize the epilogue.  This is achieved by
931 /// deriving two concrete strategy classes from this base class and invoking
932 /// them in succession from the loop vectorizer planner.
933 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
934 public:
935   InnerLoopAndEpilogueVectorizer(
936       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
937       DominatorTree *DT, const TargetLibraryInfo *TLI,
938       const TargetTransformInfo *TTI, AssumptionCache *AC,
939       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
940       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
941       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
942       GeneratedRTChecks &Checks)
943       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
944                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
945                             Checks),
946         EPI(EPI) {}
947 
948   // Override this function to handle the more complex control flow around the
949   // three loops.
950   BasicBlock *createVectorizedLoopSkeleton() final override {
951     return createEpilogueVectorizedLoopSkeleton();
952   }
953 
954   /// The interface for creating a vectorized skeleton using one of two
955   /// different strategies, each corresponding to one execution of the vplan
956   /// as described above.
957   virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0;
958 
959   /// Holds and updates state information required to vectorize the main loop
960   /// and its epilogue in two separate passes. This setup helps us avoid
961   /// regenerating and recomputing runtime safety checks. It also helps us to
962   /// shorten the iteration-count-check path length for the cases where the
963   /// iteration count of the loop is so small that the main vector loop is
964   /// completely skipped.
965   EpilogueLoopVectorizationInfo &EPI;
966 };
967 
968 /// A specialized derived class of inner loop vectorizer that performs
969 /// vectorization of *main* loops in the process of vectorizing loops and their
970 /// epilogues.
971 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
972 public:
973   EpilogueVectorizerMainLoop(
974       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
975       DominatorTree *DT, const TargetLibraryInfo *TLI,
976       const TargetTransformInfo *TTI, AssumptionCache *AC,
977       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
978       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
979       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
980       GeneratedRTChecks &Check)
981       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
982                                        EPI, LVL, CM, BFI, PSI, Check) {}
983   /// Implements the interface for creating a vectorized skeleton using the
984   /// *main loop* strategy (ie the first pass of vplan execution).
985   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
986 
987 protected:
988   /// Emits an iteration count bypass check once for the main loop (when \p
989   /// ForEpilogue is false) and once for the epilogue loop (when \p
990   /// ForEpilogue is true).
991   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
992                                              bool ForEpilogue);
993   void printDebugTracesAtStart() override;
994   void printDebugTracesAtEnd() override;
995 };
996 
997 // A specialized derived class of inner loop vectorizer that performs
998 // vectorization of *epilogue* loops in the process of vectorizing loops and
999 // their epilogues.
1000 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
1001 public:
1002   EpilogueVectorizerEpilogueLoop(
1003       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
1004       DominatorTree *DT, const TargetLibraryInfo *TLI,
1005       const TargetTransformInfo *TTI, AssumptionCache *AC,
1006       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
1007       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
1008       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
1009       GeneratedRTChecks &Checks)
1010       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
1011                                        EPI, LVL, CM, BFI, PSI, Checks) {}
1012   /// Implements the interface for creating a vectorized skeleton using the
1013   /// *epilogue loop* strategy (ie the second pass of vplan execution).
1014   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
1015 
1016 protected:
1017   /// Emits an iteration count bypass check after the main vector loop has
1018   /// finished to see if there are any iterations left to execute by either
1019   /// the vector epilogue or the scalar epilogue.
1020   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
1021                                                       BasicBlock *Bypass,
1022                                                       BasicBlock *Insert);
1023   void printDebugTracesAtStart() override;
1024   void printDebugTracesAtEnd() override;
1025 };
1026 } // end namespace llvm
1027 
1028 /// Look for a meaningful debug location on the instruction or it's
1029 /// operands.
1030 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
1031   if (!I)
1032     return I;
1033 
1034   DebugLoc Empty;
1035   if (I->getDebugLoc() != Empty)
1036     return I;
1037 
1038   for (Use &Op : I->operands()) {
1039     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
1040       if (OpInst->getDebugLoc() != Empty)
1041         return OpInst;
1042   }
1043 
1044   return I;
1045 }
1046 
1047 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
1048   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
1049     const DILocation *DIL = Inst->getDebugLoc();
1050     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1051         !isa<DbgInfoIntrinsic>(Inst)) {
1052       assert(!VF.isScalable() && "scalable vectors not yet supported.");
1053       auto NewDIL =
1054           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1055       if (NewDIL)
1056         B.SetCurrentDebugLocation(NewDIL.getValue());
1057       else
1058         LLVM_DEBUG(dbgs()
1059                    << "Failed to create new discriminator: "
1060                    << DIL->getFilename() << " Line: " << DIL->getLine());
1061     }
1062     else
1063       B.SetCurrentDebugLocation(DIL);
1064   } else
1065     B.SetCurrentDebugLocation(DebugLoc());
1066 }
1067 
1068 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
1069 /// is passed, the message relates to that particular instruction.
1070 #ifndef NDEBUG
1071 static void debugVectorizationMessage(const StringRef Prefix,
1072                                       const StringRef DebugMsg,
1073                                       Instruction *I) {
1074   dbgs() << "LV: " << Prefix << DebugMsg;
1075   if (I != nullptr)
1076     dbgs() << " " << *I;
1077   else
1078     dbgs() << '.';
1079   dbgs() << '\n';
1080 }
1081 #endif
1082 
1083 /// Create an analysis remark that explains why vectorization failed
1084 ///
1085 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
1086 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
1087 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
1088 /// the location of the remark.  \return the remark object that can be
1089 /// streamed to.
1090 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1091     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1092   Value *CodeRegion = TheLoop->getHeader();
1093   DebugLoc DL = TheLoop->getStartLoc();
1094 
1095   if (I) {
1096     CodeRegion = I->getParent();
1097     // If there is no debug location attached to the instruction, revert back to
1098     // using the loop's.
1099     if (I->getDebugLoc())
1100       DL = I->getDebugLoc();
1101   }
1102 
1103   return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1104 }
1105 
1106 /// Return a value for Step multiplied by VF.
1107 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) {
1108   assert(isa<ConstantInt>(Step) && "Expected an integer step");
1109   Constant *StepVal = ConstantInt::get(
1110       Step->getType(),
1111       cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue());
1112   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1113 }
1114 
1115 namespace llvm {
1116 
1117 /// Return the runtime value for VF.
1118 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) {
1119   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1120   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1121 }
1122 
1123 void reportVectorizationFailure(const StringRef DebugMsg,
1124                                 const StringRef OREMsg, const StringRef ORETag,
1125                                 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1126                                 Instruction *I) {
1127   LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
1128   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1129   ORE->emit(
1130       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1131       << "loop not vectorized: " << OREMsg);
1132 }
1133 
1134 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1135                              OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1136                              Instruction *I) {
1137   LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
1138   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1139   ORE->emit(
1140       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1141       << Msg);
1142 }
1143 
1144 } // end namespace llvm
1145 
1146 #ifndef NDEBUG
1147 /// \return string containing a file name and a line # for the given loop.
1148 static std::string getDebugLocString(const Loop *L) {
1149   std::string Result;
1150   if (L) {
1151     raw_string_ostream OS(Result);
1152     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1153       LoopDbgLoc.print(OS);
1154     else
1155       // Just print the module name.
1156       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1157     OS.flush();
1158   }
1159   return Result;
1160 }
1161 #endif
1162 
1163 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1164                                          const Instruction *Orig) {
1165   // If the loop was versioned with memchecks, add the corresponding no-alias
1166   // metadata.
1167   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1168     LVer->annotateInstWithNoAlias(To, Orig);
1169 }
1170 
1171 void InnerLoopVectorizer::addMetadata(Instruction *To,
1172                                       Instruction *From) {
1173   propagateMetadata(To, From);
1174   addNewMetadata(To, From);
1175 }
1176 
1177 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1178                                       Instruction *From) {
1179   for (Value *V : To) {
1180     if (Instruction *I = dyn_cast<Instruction>(V))
1181       addMetadata(I, From);
1182   }
1183 }
1184 
1185 namespace llvm {
1186 
1187 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1188 // lowered.
1189 enum ScalarEpilogueLowering {
1190 
1191   // The default: allowing scalar epilogues.
1192   CM_ScalarEpilogueAllowed,
1193 
1194   // Vectorization with OptForSize: don't allow epilogues.
1195   CM_ScalarEpilogueNotAllowedOptSize,
1196 
1197   // A special case of vectorisation with OptForSize: loops with a very small
1198   // trip count are considered for vectorization under OptForSize, thereby
1199   // making sure the cost of their loop body is dominant, free of runtime
1200   // guards and scalar iteration overheads.
1201   CM_ScalarEpilogueNotAllowedLowTripLoop,
1202 
1203   // Loop hint predicate indicating an epilogue is undesired.
1204   CM_ScalarEpilogueNotNeededUsePredicate,
1205 
1206   // Directive indicating we must either tail fold or not vectorize
1207   CM_ScalarEpilogueNotAllowedUsePredicate
1208 };
1209 
1210 /// LoopVectorizationCostModel - estimates the expected speedups due to
1211 /// vectorization.
1212 /// In many cases vectorization is not profitable. This can happen because of
1213 /// a number of reasons. In this class we mainly attempt to predict the
1214 /// expected speedup/slowdowns due to the supported instruction set. We use the
1215 /// TargetTransformInfo to query the different backends for the cost of
1216 /// different operations.
1217 class LoopVectorizationCostModel {
1218 public:
1219   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1220                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1221                              LoopVectorizationLegality *Legal,
1222                              const TargetTransformInfo &TTI,
1223                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1224                              AssumptionCache *AC,
1225                              OptimizationRemarkEmitter *ORE, const Function *F,
1226                              const LoopVectorizeHints *Hints,
1227                              InterleavedAccessInfo &IAI)
1228       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1229         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1230         Hints(Hints), InterleaveInfo(IAI) {}
1231 
1232   /// \return An upper bound for the vectorization factor, or None if
1233   /// vectorization and interleaving should be avoided up front.
1234   Optional<ElementCount> computeMaxVF(ElementCount UserVF, unsigned UserIC);
1235 
1236   /// \return True if runtime checks are required for vectorization, and false
1237   /// otherwise.
1238   bool runtimeChecksRequired();
1239 
1240   /// \return The most profitable vectorization factor and the cost of that VF.
1241   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
1242   /// then this vectorization factor will be selected if vectorization is
1243   /// possible.
1244   VectorizationFactor selectVectorizationFactor(ElementCount MaxVF);
1245   VectorizationFactor
1246   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1247                                     const LoopVectorizationPlanner &LVP);
1248 
1249   /// Setup cost-based decisions for user vectorization factor.
1250   void selectUserVectorizationFactor(ElementCount UserVF) {
1251     collectUniformsAndScalars(UserVF);
1252     collectInstsToScalarize(UserVF);
1253   }
1254 
1255   /// \return The size (in bits) of the smallest and widest types in the code
1256   /// that needs to be vectorized. We ignore values that remain scalar such as
1257   /// 64 bit loop indices.
1258   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1259 
1260   /// \return The desired interleave count.
1261   /// If interleave count has been specified by metadata it will be returned.
1262   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1263   /// are the selected vectorization factor and the cost of the selected VF.
1264   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1265 
1266   /// Memory access instruction may be vectorized in more than one way.
1267   /// Form of instruction after vectorization depends on cost.
1268   /// This function takes cost-based decisions for Load/Store instructions
1269   /// and collects them in a map. This decisions map is used for building
1270   /// the lists of loop-uniform and loop-scalar instructions.
1271   /// The calculated cost is saved with widening decision in order to
1272   /// avoid redundant calculations.
1273   void setCostBasedWideningDecision(ElementCount VF);
1274 
1275   /// A struct that represents some properties of the register usage
1276   /// of a loop.
1277   struct RegisterUsage {
1278     /// Holds the number of loop invariant values that are used in the loop.
1279     /// The key is ClassID of target-provided register class.
1280     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1281     /// Holds the maximum number of concurrent live intervals in the loop.
1282     /// The key is ClassID of target-provided register class.
1283     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1284   };
1285 
1286   /// \return Returns information about the register usages of the loop for the
1287   /// given vectorization factors.
1288   SmallVector<RegisterUsage, 8>
1289   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1290 
1291   /// Collect values we want to ignore in the cost model.
1292   void collectValuesToIgnore();
1293 
1294   /// Split reductions into those that happen in the loop, and those that happen
1295   /// outside. In loop reductions are collected into InLoopReductionChains.
1296   void collectInLoopReductions();
1297 
1298   /// \returns The smallest bitwidth each instruction can be represented with.
1299   /// The vector equivalents of these instructions should be truncated to this
1300   /// type.
1301   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1302     return MinBWs;
1303   }
1304 
1305   /// \returns True if it is more profitable to scalarize instruction \p I for
1306   /// vectorization factor \p VF.
1307   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1308     assert(VF.isVector() &&
1309            "Profitable to scalarize relevant only for VF > 1.");
1310 
1311     // Cost model is not run in the VPlan-native path - return conservative
1312     // result until this changes.
1313     if (EnableVPlanNativePath)
1314       return false;
1315 
1316     auto Scalars = InstsToScalarize.find(VF);
1317     assert(Scalars != InstsToScalarize.end() &&
1318            "VF not yet analyzed for scalarization profitability");
1319     return Scalars->second.find(I) != Scalars->second.end();
1320   }
1321 
1322   /// Returns true if \p I is known to be uniform after vectorization.
1323   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1324     if (VF.isScalar())
1325       return true;
1326 
1327     // Cost model is not run in the VPlan-native path - return conservative
1328     // result until this changes.
1329     if (EnableVPlanNativePath)
1330       return false;
1331 
1332     auto UniformsPerVF = Uniforms.find(VF);
1333     assert(UniformsPerVF != Uniforms.end() &&
1334            "VF not yet analyzed for uniformity");
1335     return UniformsPerVF->second.count(I);
1336   }
1337 
1338   /// Returns true if \p I is known to be scalar after vectorization.
1339   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1340     if (VF.isScalar())
1341       return true;
1342 
1343     // Cost model is not run in the VPlan-native path - return conservative
1344     // result until this changes.
1345     if (EnableVPlanNativePath)
1346       return false;
1347 
1348     auto ScalarsPerVF = Scalars.find(VF);
1349     assert(ScalarsPerVF != Scalars.end() &&
1350            "Scalar values are not calculated for VF");
1351     return ScalarsPerVF->second.count(I);
1352   }
1353 
1354   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1355   /// for vectorization factor \p VF.
1356   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1357     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1358            !isProfitableToScalarize(I, VF) &&
1359            !isScalarAfterVectorization(I, VF);
1360   }
1361 
1362   /// Decision that was taken during cost calculation for memory instruction.
1363   enum InstWidening {
1364     CM_Unknown,
1365     CM_Widen,         // For consecutive accesses with stride +1.
1366     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1367     CM_Interleave,
1368     CM_GatherScatter,
1369     CM_Scalarize
1370   };
1371 
1372   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1373   /// instruction \p I and vector width \p VF.
1374   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1375                            InstructionCost Cost) {
1376     assert(VF.isVector() && "Expected VF >=2");
1377     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1378   }
1379 
1380   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1381   /// interleaving group \p Grp and vector width \p VF.
1382   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1383                            ElementCount VF, InstWidening W,
1384                            InstructionCost Cost) {
1385     assert(VF.isVector() && "Expected VF >=2");
1386     /// Broadcast this decicion to all instructions inside the group.
1387     /// But the cost will be assigned to one instruction only.
1388     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1389       if (auto *I = Grp->getMember(i)) {
1390         if (Grp->getInsertPos() == I)
1391           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1392         else
1393           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1394       }
1395     }
1396   }
1397 
1398   /// Return the cost model decision for the given instruction \p I and vector
1399   /// width \p VF. Return CM_Unknown if this instruction did not pass
1400   /// through the cost modeling.
1401   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1402     assert(VF.isVector() && "Expected VF to be a vector VF");
1403     // Cost model is not run in the VPlan-native path - return conservative
1404     // result until this changes.
1405     if (EnableVPlanNativePath)
1406       return CM_GatherScatter;
1407 
1408     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1409     auto Itr = WideningDecisions.find(InstOnVF);
1410     if (Itr == WideningDecisions.end())
1411       return CM_Unknown;
1412     return Itr->second.first;
1413   }
1414 
1415   /// Return the vectorization cost for the given instruction \p I and vector
1416   /// width \p VF.
1417   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1418     assert(VF.isVector() && "Expected VF >=2");
1419     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1420     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1421            "The cost is not calculated");
1422     return WideningDecisions[InstOnVF].second;
1423   }
1424 
1425   /// Return True if instruction \p I is an optimizable truncate whose operand
1426   /// is an induction variable. Such a truncate will be removed by adding a new
1427   /// induction variable with the destination type.
1428   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1429     // If the instruction is not a truncate, return false.
1430     auto *Trunc = dyn_cast<TruncInst>(I);
1431     if (!Trunc)
1432       return false;
1433 
1434     // Get the source and destination types of the truncate.
1435     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1436     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1437 
1438     // If the truncate is free for the given types, return false. Replacing a
1439     // free truncate with an induction variable would add an induction variable
1440     // update instruction to each iteration of the loop. We exclude from this
1441     // check the primary induction variable since it will need an update
1442     // instruction regardless.
1443     Value *Op = Trunc->getOperand(0);
1444     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1445       return false;
1446 
1447     // If the truncated value is not an induction variable, return false.
1448     return Legal->isInductionPhi(Op);
1449   }
1450 
1451   /// Collects the instructions to scalarize for each predicated instruction in
1452   /// the loop.
1453   void collectInstsToScalarize(ElementCount VF);
1454 
1455   /// Collect Uniform and Scalar values for the given \p VF.
1456   /// The sets depend on CM decision for Load/Store instructions
1457   /// that may be vectorized as interleave, gather-scatter or scalarized.
1458   void collectUniformsAndScalars(ElementCount VF) {
1459     // Do the analysis once.
1460     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1461       return;
1462     setCostBasedWideningDecision(VF);
1463     collectLoopUniforms(VF);
1464     collectLoopScalars(VF);
1465   }
1466 
1467   /// Returns true if the target machine supports masked store operation
1468   /// for the given \p DataType and kind of access to \p Ptr.
1469   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1470     return Legal->isConsecutivePtr(Ptr) &&
1471            TTI.isLegalMaskedStore(DataType, Alignment);
1472   }
1473 
1474   /// Returns true if the target machine supports masked load operation
1475   /// for the given \p DataType and kind of access to \p Ptr.
1476   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1477     return Legal->isConsecutivePtr(Ptr) &&
1478            TTI.isLegalMaskedLoad(DataType, Alignment);
1479   }
1480 
1481   /// Returns true if the target machine supports masked scatter operation
1482   /// for the given \p DataType.
1483   bool isLegalMaskedScatter(Type *DataType, Align Alignment) const {
1484     return TTI.isLegalMaskedScatter(DataType, Alignment);
1485   }
1486 
1487   /// Returns true if the target machine supports masked gather operation
1488   /// for the given \p DataType.
1489   bool isLegalMaskedGather(Type *DataType, Align Alignment) const {
1490     return TTI.isLegalMaskedGather(DataType, Alignment);
1491   }
1492 
1493   /// Returns true if the target machine can represent \p V as a masked gather
1494   /// or scatter operation.
1495   bool isLegalGatherOrScatter(Value *V) {
1496     bool LI = isa<LoadInst>(V);
1497     bool SI = isa<StoreInst>(V);
1498     if (!LI && !SI)
1499       return false;
1500     auto *Ty = getMemInstValueType(V);
1501     Align Align = getLoadStoreAlignment(V);
1502     return (LI && isLegalMaskedGather(Ty, Align)) ||
1503            (SI && isLegalMaskedScatter(Ty, Align));
1504   }
1505 
1506   /// Returns true if the target machine supports all of the reduction
1507   /// variables found for the given VF.
1508   bool canVectorizeReductions(ElementCount VF) {
1509     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1510       RecurrenceDescriptor RdxDesc = Reduction.second;
1511       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1512     }));
1513   }
1514 
1515   /// Returns true if \p I is an instruction that will be scalarized with
1516   /// predication. Such instructions include conditional stores and
1517   /// instructions that may divide by zero.
1518   /// If a non-zero VF has been calculated, we check if I will be scalarized
1519   /// predication for that VF.
1520   bool
1521   isScalarWithPredication(Instruction *I,
1522                           ElementCount VF = ElementCount::getFixed(1)) const;
1523 
1524   // Returns true if \p I is an instruction that will be predicated either
1525   // through scalar predication or masked load/store or masked gather/scatter.
1526   // Superset of instructions that return true for isScalarWithPredication.
1527   bool isPredicatedInst(Instruction *I, ElementCount VF) {
1528     if (!blockNeedsPredication(I->getParent()))
1529       return false;
1530     // Loads and stores that need some form of masked operation are predicated
1531     // instructions.
1532     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1533       return Legal->isMaskRequired(I);
1534     return isScalarWithPredication(I, VF);
1535   }
1536 
1537   /// Returns true if \p I is a memory instruction with consecutive memory
1538   /// access that can be widened.
1539   bool
1540   memoryInstructionCanBeWidened(Instruction *I,
1541                                 ElementCount VF = ElementCount::getFixed(1));
1542 
1543   /// Returns true if \p I is a memory instruction in an interleaved-group
1544   /// of memory accesses that can be vectorized with wide vector loads/stores
1545   /// and shuffles.
1546   bool
1547   interleavedAccessCanBeWidened(Instruction *I,
1548                                 ElementCount VF = ElementCount::getFixed(1));
1549 
1550   /// Check if \p Instr belongs to any interleaved access group.
1551   bool isAccessInterleaved(Instruction *Instr) {
1552     return InterleaveInfo.isInterleaved(Instr);
1553   }
1554 
1555   /// Get the interleaved access group that \p Instr belongs to.
1556   const InterleaveGroup<Instruction> *
1557   getInterleavedAccessGroup(Instruction *Instr) {
1558     return InterleaveInfo.getInterleaveGroup(Instr);
1559   }
1560 
1561   /// Returns true if we're required to use a scalar epilogue for at least
1562   /// the final iteration of the original loop.
1563   bool requiresScalarEpilogue() const {
1564     if (!isScalarEpilogueAllowed())
1565       return false;
1566     // If we might exit from anywhere but the latch, must run the exiting
1567     // iteration in scalar form.
1568     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1569       return true;
1570     return InterleaveInfo.requiresScalarEpilogue();
1571   }
1572 
1573   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1574   /// loop hint annotation.
1575   bool isScalarEpilogueAllowed() const {
1576     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1577   }
1578 
1579   /// Returns true if all loop blocks should be masked to fold tail loop.
1580   bool foldTailByMasking() const { return FoldTailByMasking; }
1581 
1582   bool blockNeedsPredication(BasicBlock *BB) const {
1583     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1584   }
1585 
1586   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1587   /// nodes to the chain of instructions representing the reductions. Uses a
1588   /// MapVector to ensure deterministic iteration order.
1589   using ReductionChainMap =
1590       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1591 
1592   /// Return the chain of instructions representing an inloop reduction.
1593   const ReductionChainMap &getInLoopReductionChains() const {
1594     return InLoopReductionChains;
1595   }
1596 
1597   /// Returns true if the Phi is part of an inloop reduction.
1598   bool isInLoopReduction(PHINode *Phi) const {
1599     return InLoopReductionChains.count(Phi);
1600   }
1601 
1602   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1603   /// with factor VF.  Return the cost of the instruction, including
1604   /// scalarization overhead if it's needed.
1605   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1606 
1607   /// Estimate cost of a call instruction CI if it were vectorized with factor
1608   /// VF. Return the cost of the instruction, including scalarization overhead
1609   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1610   /// scalarized -
1611   /// i.e. either vector version isn't available, or is too expensive.
1612   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1613                                     bool &NeedToScalarize) const;
1614 
1615   /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1616   /// that of B.
1617   bool isMoreProfitable(const VectorizationFactor &A,
1618                         const VectorizationFactor &B) const;
1619 
1620   /// Invalidates decisions already taken by the cost model.
1621   void invalidateCostModelingDecisions() {
1622     WideningDecisions.clear();
1623     Uniforms.clear();
1624     Scalars.clear();
1625   }
1626 
1627 private:
1628   unsigned NumPredStores = 0;
1629 
1630   /// \return An upper bound for the vectorization factor, a power-of-2 larger
1631   /// than zero. One is returned if vectorization should best be avoided due
1632   /// to cost.
1633   ElementCount computeFeasibleMaxVF(unsigned ConstTripCount,
1634                                     ElementCount UserVF);
1635 
1636   /// \return the maximized element count based on the targets vector
1637   /// registers and the loop trip-count, but limited to a maximum safe VF.
1638   /// This is a helper function of computeFeasibleMaxVF.
1639   /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1640   /// issue that occurred on one of the buildbots which cannot be reproduced
1641   /// without having access to the properietary compiler (see comments on
1642   /// D98509). The issue is currently under investigation and this workaround
1643   /// will be removed as soon as possible.
1644   ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1645                                        unsigned SmallestType,
1646                                        unsigned WidestType,
1647                                        const ElementCount &MaxSafeVF);
1648 
1649   /// \return the maximum legal scalable VF, based on the safe max number
1650   /// of elements.
1651   ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1652 
1653   /// The vectorization cost is a combination of the cost itself and a boolean
1654   /// indicating whether any of the contributing operations will actually
1655   /// operate on
1656   /// vector values after type legalization in the backend. If this latter value
1657   /// is
1658   /// false, then all operations will be scalarized (i.e. no vectorization has
1659   /// actually taken place).
1660   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1661 
1662   /// Returns the expected execution cost. The unit of the cost does
1663   /// not matter because we use the 'cost' units to compare different
1664   /// vector widths. The cost that is returned is *not* normalized by
1665   /// the factor width.
1666   VectorizationCostTy expectedCost(ElementCount VF);
1667 
1668   /// Returns the execution time cost of an instruction for a given vector
1669   /// width. Vector width of one means scalar.
1670   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1671 
1672   /// The cost-computation logic from getInstructionCost which provides
1673   /// the vector type as an output parameter.
1674   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1675                                      Type *&VectorTy);
1676 
1677   /// Return the cost of instructions in an inloop reduction pattern, if I is
1678   /// part of that pattern.
1679   InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF,
1680                                           Type *VectorTy,
1681                                           TTI::TargetCostKind CostKind);
1682 
1683   /// Calculate vectorization cost of memory instruction \p I.
1684   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1685 
1686   /// The cost computation for scalarized memory instruction.
1687   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1688 
1689   /// The cost computation for interleaving group of memory instructions.
1690   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1691 
1692   /// The cost computation for Gather/Scatter instruction.
1693   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1694 
1695   /// The cost computation for widening instruction \p I with consecutive
1696   /// memory access.
1697   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1698 
1699   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1700   /// Load: scalar load + broadcast.
1701   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1702   /// element)
1703   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1704 
1705   /// Estimate the overhead of scalarizing an instruction. This is a
1706   /// convenience wrapper for the type-based getScalarizationOverhead API.
1707   InstructionCost getScalarizationOverhead(Instruction *I,
1708                                            ElementCount VF) const;
1709 
1710   /// Returns whether the instruction is a load or store and will be a emitted
1711   /// as a vector operation.
1712   bool isConsecutiveLoadOrStore(Instruction *I);
1713 
1714   /// Returns true if an artificially high cost for emulated masked memrefs
1715   /// should be used.
1716   bool useEmulatedMaskMemRefHack(Instruction *I);
1717 
1718   /// Map of scalar integer values to the smallest bitwidth they can be legally
1719   /// represented as. The vector equivalents of these values should be truncated
1720   /// to this type.
1721   MapVector<Instruction *, uint64_t> MinBWs;
1722 
1723   /// A type representing the costs for instructions if they were to be
1724   /// scalarized rather than vectorized. The entries are Instruction-Cost
1725   /// pairs.
1726   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1727 
1728   /// A set containing all BasicBlocks that are known to present after
1729   /// vectorization as a predicated block.
1730   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1731 
1732   /// Records whether it is allowed to have the original scalar loop execute at
1733   /// least once. This may be needed as a fallback loop in case runtime
1734   /// aliasing/dependence checks fail, or to handle the tail/remainder
1735   /// iterations when the trip count is unknown or doesn't divide by the VF,
1736   /// or as a peel-loop to handle gaps in interleave-groups.
1737   /// Under optsize and when the trip count is very small we don't allow any
1738   /// iterations to execute in the scalar loop.
1739   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1740 
1741   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1742   bool FoldTailByMasking = false;
1743 
1744   /// A map holding scalar costs for different vectorization factors. The
1745   /// presence of a cost for an instruction in the mapping indicates that the
1746   /// instruction will be scalarized when vectorizing with the associated
1747   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1748   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1749 
1750   /// Holds the instructions known to be uniform after vectorization.
1751   /// The data is collected per VF.
1752   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1753 
1754   /// Holds the instructions known to be scalar after vectorization.
1755   /// The data is collected per VF.
1756   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1757 
1758   /// Holds the instructions (address computations) that are forced to be
1759   /// scalarized.
1760   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1761 
1762   /// PHINodes of the reductions that should be expanded in-loop along with
1763   /// their associated chains of reduction operations, in program order from top
1764   /// (PHI) to bottom
1765   ReductionChainMap InLoopReductionChains;
1766 
1767   /// A Map of inloop reduction operations and their immediate chain operand.
1768   /// FIXME: This can be removed once reductions can be costed correctly in
1769   /// vplan. This was added to allow quick lookup to the inloop operations,
1770   /// without having to loop through InLoopReductionChains.
1771   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1772 
1773   /// Returns the expected difference in cost from scalarizing the expression
1774   /// feeding a predicated instruction \p PredInst. The instructions to
1775   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1776   /// non-negative return value implies the expression will be scalarized.
1777   /// Currently, only single-use chains are considered for scalarization.
1778   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1779                               ElementCount VF);
1780 
1781   /// Collect the instructions that are uniform after vectorization. An
1782   /// instruction is uniform if we represent it with a single scalar value in
1783   /// the vectorized loop corresponding to each vector iteration. Examples of
1784   /// uniform instructions include pointer operands of consecutive or
1785   /// interleaved memory accesses. Note that although uniformity implies an
1786   /// instruction will be scalar, the reverse is not true. In general, a
1787   /// scalarized instruction will be represented by VF scalar values in the
1788   /// vectorized loop, each corresponding to an iteration of the original
1789   /// scalar loop.
1790   void collectLoopUniforms(ElementCount VF);
1791 
1792   /// Collect the instructions that are scalar after vectorization. An
1793   /// instruction is scalar if it is known to be uniform or will be scalarized
1794   /// during vectorization. Non-uniform scalarized instructions will be
1795   /// represented by VF values in the vectorized loop, each corresponding to an
1796   /// iteration of the original scalar loop.
1797   void collectLoopScalars(ElementCount VF);
1798 
1799   /// Keeps cost model vectorization decision and cost for instructions.
1800   /// Right now it is used for memory instructions only.
1801   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1802                                 std::pair<InstWidening, InstructionCost>>;
1803 
1804   DecisionList WideningDecisions;
1805 
1806   /// Returns true if \p V is expected to be vectorized and it needs to be
1807   /// extracted.
1808   bool needsExtract(Value *V, ElementCount VF) const {
1809     Instruction *I = dyn_cast<Instruction>(V);
1810     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1811         TheLoop->isLoopInvariant(I))
1812       return false;
1813 
1814     // Assume we can vectorize V (and hence we need extraction) if the
1815     // scalars are not computed yet. This can happen, because it is called
1816     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1817     // the scalars are collected. That should be a safe assumption in most
1818     // cases, because we check if the operands have vectorizable types
1819     // beforehand in LoopVectorizationLegality.
1820     return Scalars.find(VF) == Scalars.end() ||
1821            !isScalarAfterVectorization(I, VF);
1822   };
1823 
1824   /// Returns a range containing only operands needing to be extracted.
1825   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1826                                                    ElementCount VF) const {
1827     return SmallVector<Value *, 4>(make_filter_range(
1828         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1829   }
1830 
1831   /// Determines if we have the infrastructure to vectorize loop \p L and its
1832   /// epilogue, assuming the main loop is vectorized by \p VF.
1833   bool isCandidateForEpilogueVectorization(const Loop &L,
1834                                            const ElementCount VF) const;
1835 
1836   /// Returns true if epilogue vectorization is considered profitable, and
1837   /// false otherwise.
1838   /// \p VF is the vectorization factor chosen for the original loop.
1839   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1840 
1841 public:
1842   /// The loop that we evaluate.
1843   Loop *TheLoop;
1844 
1845   /// Predicated scalar evolution analysis.
1846   PredicatedScalarEvolution &PSE;
1847 
1848   /// Loop Info analysis.
1849   LoopInfo *LI;
1850 
1851   /// Vectorization legality.
1852   LoopVectorizationLegality *Legal;
1853 
1854   /// Vector target information.
1855   const TargetTransformInfo &TTI;
1856 
1857   /// Target Library Info.
1858   const TargetLibraryInfo *TLI;
1859 
1860   /// Demanded bits analysis.
1861   DemandedBits *DB;
1862 
1863   /// Assumption cache.
1864   AssumptionCache *AC;
1865 
1866   /// Interface to emit optimization remarks.
1867   OptimizationRemarkEmitter *ORE;
1868 
1869   const Function *TheFunction;
1870 
1871   /// Loop Vectorize Hint.
1872   const LoopVectorizeHints *Hints;
1873 
1874   /// The interleave access information contains groups of interleaved accesses
1875   /// with the same stride and close to each other.
1876   InterleavedAccessInfo &InterleaveInfo;
1877 
1878   /// Values to ignore in the cost model.
1879   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1880 
1881   /// Values to ignore in the cost model when VF > 1.
1882   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1883 
1884   /// Profitable vector factors.
1885   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1886 };
1887 } // end namespace llvm
1888 
1889 /// Helper struct to manage generating runtime checks for vectorization.
1890 ///
1891 /// The runtime checks are created up-front in temporary blocks to allow better
1892 /// estimating the cost and un-linked from the existing IR. After deciding to
1893 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1894 /// temporary blocks are completely removed.
1895 class GeneratedRTChecks {
1896   /// Basic block which contains the generated SCEV checks, if any.
1897   BasicBlock *SCEVCheckBlock = nullptr;
1898 
1899   /// The value representing the result of the generated SCEV checks. If it is
1900   /// nullptr, either no SCEV checks have been generated or they have been used.
1901   Value *SCEVCheckCond = nullptr;
1902 
1903   /// Basic block which contains the generated memory runtime checks, if any.
1904   BasicBlock *MemCheckBlock = nullptr;
1905 
1906   /// The value representing the result of the generated memory runtime checks.
1907   /// If it is nullptr, either no memory runtime checks have been generated or
1908   /// they have been used.
1909   Instruction *MemRuntimeCheckCond = nullptr;
1910 
1911   DominatorTree *DT;
1912   LoopInfo *LI;
1913 
1914   SCEVExpander SCEVExp;
1915   SCEVExpander MemCheckExp;
1916 
1917 public:
1918   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
1919                     const DataLayout &DL)
1920       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
1921         MemCheckExp(SE, DL, "scev.check") {}
1922 
1923   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
1924   /// accurately estimate the cost of the runtime checks. The blocks are
1925   /// un-linked from the IR and is added back during vector code generation. If
1926   /// there is no vector code generation, the check blocks are removed
1927   /// completely.
1928   void Create(Loop *L, const LoopAccessInfo &LAI,
1929               const SCEVUnionPredicate &UnionPred) {
1930 
1931     BasicBlock *LoopHeader = L->getHeader();
1932     BasicBlock *Preheader = L->getLoopPreheader();
1933 
1934     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
1935     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
1936     // may be used by SCEVExpander. The blocks will be un-linked from their
1937     // predecessors and removed from LI & DT at the end of the function.
1938     if (!UnionPred.isAlwaysTrue()) {
1939       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
1940                                   nullptr, "vector.scevcheck");
1941 
1942       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
1943           &UnionPred, SCEVCheckBlock->getTerminator());
1944     }
1945 
1946     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
1947     if (RtPtrChecking.Need) {
1948       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
1949       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
1950                                  "vector.memcheck");
1951 
1952       std::tie(std::ignore, MemRuntimeCheckCond) =
1953           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
1954                            RtPtrChecking.getChecks(), MemCheckExp);
1955       assert(MemRuntimeCheckCond &&
1956              "no RT checks generated although RtPtrChecking "
1957              "claimed checks are required");
1958     }
1959 
1960     if (!MemCheckBlock && !SCEVCheckBlock)
1961       return;
1962 
1963     // Unhook the temporary block with the checks, update various places
1964     // accordingly.
1965     if (SCEVCheckBlock)
1966       SCEVCheckBlock->replaceAllUsesWith(Preheader);
1967     if (MemCheckBlock)
1968       MemCheckBlock->replaceAllUsesWith(Preheader);
1969 
1970     if (SCEVCheckBlock) {
1971       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
1972       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
1973       Preheader->getTerminator()->eraseFromParent();
1974     }
1975     if (MemCheckBlock) {
1976       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
1977       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
1978       Preheader->getTerminator()->eraseFromParent();
1979     }
1980 
1981     DT->changeImmediateDominator(LoopHeader, Preheader);
1982     if (MemCheckBlock) {
1983       DT->eraseNode(MemCheckBlock);
1984       LI->removeBlock(MemCheckBlock);
1985     }
1986     if (SCEVCheckBlock) {
1987       DT->eraseNode(SCEVCheckBlock);
1988       LI->removeBlock(SCEVCheckBlock);
1989     }
1990   }
1991 
1992   /// Remove the created SCEV & memory runtime check blocks & instructions, if
1993   /// unused.
1994   ~GeneratedRTChecks() {
1995     SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT);
1996     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT);
1997     if (!SCEVCheckCond)
1998       SCEVCleaner.markResultUsed();
1999 
2000     if (!MemRuntimeCheckCond)
2001       MemCheckCleaner.markResultUsed();
2002 
2003     if (MemRuntimeCheckCond) {
2004       auto &SE = *MemCheckExp.getSE();
2005       // Memory runtime check generation creates compares that use expanded
2006       // values. Remove them before running the SCEVExpanderCleaners.
2007       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2008         if (MemCheckExp.isInsertedInstruction(&I))
2009           continue;
2010         SE.forgetValue(&I);
2011         SE.eraseValueFromMap(&I);
2012         I.eraseFromParent();
2013       }
2014     }
2015     MemCheckCleaner.cleanup();
2016     SCEVCleaner.cleanup();
2017 
2018     if (SCEVCheckCond)
2019       SCEVCheckBlock->eraseFromParent();
2020     if (MemRuntimeCheckCond)
2021       MemCheckBlock->eraseFromParent();
2022   }
2023 
2024   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2025   /// adjusts the branches to branch to the vector preheader or \p Bypass,
2026   /// depending on the generated condition.
2027   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass,
2028                              BasicBlock *LoopVectorPreHeader,
2029                              BasicBlock *LoopExitBlock) {
2030     if (!SCEVCheckCond)
2031       return nullptr;
2032     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2033       if (C->isZero())
2034         return nullptr;
2035 
2036     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2037 
2038     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2039     // Create new preheader for vector loop.
2040     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2041       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2042 
2043     SCEVCheckBlock->getTerminator()->eraseFromParent();
2044     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2045     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2046                                                 SCEVCheckBlock);
2047 
2048     DT->addNewBlock(SCEVCheckBlock, Pred);
2049     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2050 
2051     ReplaceInstWithInst(
2052         SCEVCheckBlock->getTerminator(),
2053         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2054     // Mark the check as used, to prevent it from being removed during cleanup.
2055     SCEVCheckCond = nullptr;
2056     return SCEVCheckBlock;
2057   }
2058 
2059   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2060   /// the branches to branch to the vector preheader or \p Bypass, depending on
2061   /// the generated condition.
2062   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass,
2063                                    BasicBlock *LoopVectorPreHeader) {
2064     // Check if we generated code that checks in runtime if arrays overlap.
2065     if (!MemRuntimeCheckCond)
2066       return nullptr;
2067 
2068     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2069     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2070                                                 MemCheckBlock);
2071 
2072     DT->addNewBlock(MemCheckBlock, Pred);
2073     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2074     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2075 
2076     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2077       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2078 
2079     ReplaceInstWithInst(
2080         MemCheckBlock->getTerminator(),
2081         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2082     MemCheckBlock->getTerminator()->setDebugLoc(
2083         Pred->getTerminator()->getDebugLoc());
2084 
2085     // Mark the check as used, to prevent it from being removed during cleanup.
2086     MemRuntimeCheckCond = nullptr;
2087     return MemCheckBlock;
2088   }
2089 };
2090 
2091 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2092 // vectorization. The loop needs to be annotated with #pragma omp simd
2093 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2094 // vector length information is not provided, vectorization is not considered
2095 // explicit. Interleave hints are not allowed either. These limitations will be
2096 // relaxed in the future.
2097 // Please, note that we are currently forced to abuse the pragma 'clang
2098 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2099 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2100 // provides *explicit vectorization hints* (LV can bypass legal checks and
2101 // assume that vectorization is legal). However, both hints are implemented
2102 // using the same metadata (llvm.loop.vectorize, processed by
2103 // LoopVectorizeHints). This will be fixed in the future when the native IR
2104 // representation for pragma 'omp simd' is introduced.
2105 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2106                                    OptimizationRemarkEmitter *ORE) {
2107   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2108   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2109 
2110   // Only outer loops with an explicit vectorization hint are supported.
2111   // Unannotated outer loops are ignored.
2112   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2113     return false;
2114 
2115   Function *Fn = OuterLp->getHeader()->getParent();
2116   if (!Hints.allowVectorization(Fn, OuterLp,
2117                                 true /*VectorizeOnlyWhenForced*/)) {
2118     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2119     return false;
2120   }
2121 
2122   if (Hints.getInterleave() > 1) {
2123     // TODO: Interleave support is future work.
2124     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2125                          "outer loops.\n");
2126     Hints.emitRemarkWithHints();
2127     return false;
2128   }
2129 
2130   return true;
2131 }
2132 
2133 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2134                                   OptimizationRemarkEmitter *ORE,
2135                                   SmallVectorImpl<Loop *> &V) {
2136   // Collect inner loops and outer loops without irreducible control flow. For
2137   // now, only collect outer loops that have explicit vectorization hints. If we
2138   // are stress testing the VPlan H-CFG construction, we collect the outermost
2139   // loop of every loop nest.
2140   if (L.isInnermost() || VPlanBuildStressTest ||
2141       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2142     LoopBlocksRPO RPOT(&L);
2143     RPOT.perform(LI);
2144     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2145       V.push_back(&L);
2146       // TODO: Collect inner loops inside marked outer loops in case
2147       // vectorization fails for the outer loop. Do not invoke
2148       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2149       // already known to be reducible. We can use an inherited attribute for
2150       // that.
2151       return;
2152     }
2153   }
2154   for (Loop *InnerL : L)
2155     collectSupportedLoops(*InnerL, LI, ORE, V);
2156 }
2157 
2158 namespace {
2159 
2160 /// The LoopVectorize Pass.
2161 struct LoopVectorize : public FunctionPass {
2162   /// Pass identification, replacement for typeid
2163   static char ID;
2164 
2165   LoopVectorizePass Impl;
2166 
2167   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2168                          bool VectorizeOnlyWhenForced = false)
2169       : FunctionPass(ID),
2170         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2171     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2172   }
2173 
2174   bool runOnFunction(Function &F) override {
2175     if (skipFunction(F))
2176       return false;
2177 
2178     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2179     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2180     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2181     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2182     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2183     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2184     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2185     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2186     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2187     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2188     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2189     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2190     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2191 
2192     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2193         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2194 
2195     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2196                         GetLAA, *ORE, PSI).MadeAnyChange;
2197   }
2198 
2199   void getAnalysisUsage(AnalysisUsage &AU) const override {
2200     AU.addRequired<AssumptionCacheTracker>();
2201     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2202     AU.addRequired<DominatorTreeWrapperPass>();
2203     AU.addRequired<LoopInfoWrapperPass>();
2204     AU.addRequired<ScalarEvolutionWrapperPass>();
2205     AU.addRequired<TargetTransformInfoWrapperPass>();
2206     AU.addRequired<AAResultsWrapperPass>();
2207     AU.addRequired<LoopAccessLegacyAnalysis>();
2208     AU.addRequired<DemandedBitsWrapperPass>();
2209     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2210     AU.addRequired<InjectTLIMappingsLegacy>();
2211 
2212     // We currently do not preserve loopinfo/dominator analyses with outer loop
2213     // vectorization. Until this is addressed, mark these analyses as preserved
2214     // only for non-VPlan-native path.
2215     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2216     if (!EnableVPlanNativePath) {
2217       AU.addPreserved<LoopInfoWrapperPass>();
2218       AU.addPreserved<DominatorTreeWrapperPass>();
2219     }
2220 
2221     AU.addPreserved<BasicAAWrapperPass>();
2222     AU.addPreserved<GlobalsAAWrapperPass>();
2223     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2224   }
2225 };
2226 
2227 } // end anonymous namespace
2228 
2229 //===----------------------------------------------------------------------===//
2230 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2231 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2232 //===----------------------------------------------------------------------===//
2233 
2234 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2235   // We need to place the broadcast of invariant variables outside the loop,
2236   // but only if it's proven safe to do so. Else, broadcast will be inside
2237   // vector loop body.
2238   Instruction *Instr = dyn_cast<Instruction>(V);
2239   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2240                      (!Instr ||
2241                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2242   // Place the code for broadcasting invariant variables in the new preheader.
2243   IRBuilder<>::InsertPointGuard Guard(Builder);
2244   if (SafeToHoist)
2245     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2246 
2247   // Broadcast the scalar into all locations in the vector.
2248   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2249 
2250   return Shuf;
2251 }
2252 
2253 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2254     const InductionDescriptor &II, Value *Step, Value *Start,
2255     Instruction *EntryVal, VPValue *Def, VPValue *CastDef,
2256     VPTransformState &State) {
2257   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2258          "Expected either an induction phi-node or a truncate of it!");
2259 
2260   // Construct the initial value of the vector IV in the vector loop preheader
2261   auto CurrIP = Builder.saveIP();
2262   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2263   if (isa<TruncInst>(EntryVal)) {
2264     assert(Start->getType()->isIntegerTy() &&
2265            "Truncation requires an integer type");
2266     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2267     Step = Builder.CreateTrunc(Step, TruncType);
2268     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2269   }
2270   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
2271   Value *SteppedStart =
2272       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
2273 
2274   // We create vector phi nodes for both integer and floating-point induction
2275   // variables. Here, we determine the kind of arithmetic we will perform.
2276   Instruction::BinaryOps AddOp;
2277   Instruction::BinaryOps MulOp;
2278   if (Step->getType()->isIntegerTy()) {
2279     AddOp = Instruction::Add;
2280     MulOp = Instruction::Mul;
2281   } else {
2282     AddOp = II.getInductionOpcode();
2283     MulOp = Instruction::FMul;
2284   }
2285 
2286   // Multiply the vectorization factor by the step using integer or
2287   // floating-point arithmetic as appropriate.
2288   Type *StepType = Step->getType();
2289   if (Step->getType()->isFloatingPointTy())
2290     StepType = IntegerType::get(StepType->getContext(),
2291                                 StepType->getScalarSizeInBits());
2292   Value *RuntimeVF = getRuntimeVF(Builder, StepType, VF);
2293   if (Step->getType()->isFloatingPointTy())
2294     RuntimeVF = Builder.CreateSIToFP(RuntimeVF, Step->getType());
2295   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
2296 
2297   // Create a vector splat to use in the induction update.
2298   //
2299   // FIXME: If the step is non-constant, we create the vector splat with
2300   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2301   //        handle a constant vector splat.
2302   Value *SplatVF = isa<Constant>(Mul)
2303                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
2304                        : Builder.CreateVectorSplat(VF, Mul);
2305   Builder.restoreIP(CurrIP);
2306 
2307   // We may need to add the step a number of times, depending on the unroll
2308   // factor. The last of those goes into the PHI.
2309   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2310                                     &*LoopVectorBody->getFirstInsertionPt());
2311   VecInd->setDebugLoc(EntryVal->getDebugLoc());
2312   Instruction *LastInduction = VecInd;
2313   for (unsigned Part = 0; Part < UF; ++Part) {
2314     State.set(Def, LastInduction, Part);
2315 
2316     if (isa<TruncInst>(EntryVal))
2317       addMetadata(LastInduction, EntryVal);
2318     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef,
2319                                           State, Part);
2320 
2321     LastInduction = cast<Instruction>(
2322         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
2323     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2324   }
2325 
2326   // Move the last step to the end of the latch block. This ensures consistent
2327   // placement of all induction updates.
2328   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2329   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2330   auto *ICmp = cast<Instruction>(Br->getCondition());
2331   LastInduction->moveBefore(ICmp);
2332   LastInduction->setName("vec.ind.next");
2333 
2334   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2335   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2336 }
2337 
2338 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2339   return Cost->isScalarAfterVectorization(I, VF) ||
2340          Cost->isProfitableToScalarize(I, VF);
2341 }
2342 
2343 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2344   if (shouldScalarizeInstruction(IV))
2345     return true;
2346   auto isScalarInst = [&](User *U) -> bool {
2347     auto *I = cast<Instruction>(U);
2348     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2349   };
2350   return llvm::any_of(IV->users(), isScalarInst);
2351 }
2352 
2353 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
2354     const InductionDescriptor &ID, const Instruction *EntryVal,
2355     Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State,
2356     unsigned Part, unsigned Lane) {
2357   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2358          "Expected either an induction phi-node or a truncate of it!");
2359 
2360   // This induction variable is not the phi from the original loop but the
2361   // newly-created IV based on the proof that casted Phi is equal to the
2362   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
2363   // re-uses the same InductionDescriptor that original IV uses but we don't
2364   // have to do any recording in this case - that is done when original IV is
2365   // processed.
2366   if (isa<TruncInst>(EntryVal))
2367     return;
2368 
2369   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
2370   if (Casts.empty())
2371     return;
2372   // Only the first Cast instruction in the Casts vector is of interest.
2373   // The rest of the Casts (if exist) have no uses outside the
2374   // induction update chain itself.
2375   if (Lane < UINT_MAX)
2376     State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane));
2377   else
2378     State.set(CastDef, VectorLoopVal, Part);
2379 }
2380 
2381 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start,
2382                                                 TruncInst *Trunc, VPValue *Def,
2383                                                 VPValue *CastDef,
2384                                                 VPTransformState &State) {
2385   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
2386          "Primary induction variable must have an integer type");
2387 
2388   auto II = Legal->getInductionVars().find(IV);
2389   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
2390 
2391   auto ID = II->second;
2392   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2393 
2394   // The value from the original loop to which we are mapping the new induction
2395   // variable.
2396   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2397 
2398   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2399 
2400   // Generate code for the induction step. Note that induction steps are
2401   // required to be loop-invariant
2402   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2403     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
2404            "Induction step should be loop invariant");
2405     if (PSE.getSE()->isSCEVable(IV->getType())) {
2406       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2407       return Exp.expandCodeFor(Step, Step->getType(),
2408                                LoopVectorPreHeader->getTerminator());
2409     }
2410     return cast<SCEVUnknown>(Step)->getValue();
2411   };
2412 
2413   // The scalar value to broadcast. This is derived from the canonical
2414   // induction variable. If a truncation type is given, truncate the canonical
2415   // induction variable and step. Otherwise, derive these values from the
2416   // induction descriptor.
2417   auto CreateScalarIV = [&](Value *&Step) -> Value * {
2418     Value *ScalarIV = Induction;
2419     if (IV != OldInduction) {
2420       ScalarIV = IV->getType()->isIntegerTy()
2421                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
2422                      : Builder.CreateCast(Instruction::SIToFP, Induction,
2423                                           IV->getType());
2424       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
2425       ScalarIV->setName("offset.idx");
2426     }
2427     if (Trunc) {
2428       auto *TruncType = cast<IntegerType>(Trunc->getType());
2429       assert(Step->getType()->isIntegerTy() &&
2430              "Truncation requires an integer step");
2431       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2432       Step = Builder.CreateTrunc(Step, TruncType);
2433     }
2434     return ScalarIV;
2435   };
2436 
2437   // Create the vector values from the scalar IV, in the absence of creating a
2438   // vector IV.
2439   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
2440     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2441     for (unsigned Part = 0; Part < UF; ++Part) {
2442       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2443       Value *EntryPart =
2444           getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step,
2445                         ID.getInductionOpcode());
2446       State.set(Def, EntryPart, Part);
2447       if (Trunc)
2448         addMetadata(EntryPart, Trunc);
2449       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef,
2450                                             State, Part);
2451     }
2452   };
2453 
2454   // Fast-math-flags propagate from the original induction instruction.
2455   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2456   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
2457     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
2458 
2459   // Now do the actual transformations, and start with creating the step value.
2460   Value *Step = CreateStepValue(ID.getStep());
2461   if (VF.isZero() || VF.isScalar()) {
2462     Value *ScalarIV = CreateScalarIV(Step);
2463     CreateSplatIV(ScalarIV, Step);
2464     return;
2465   }
2466 
2467   // Determine if we want a scalar version of the induction variable. This is
2468   // true if the induction variable itself is not widened, or if it has at
2469   // least one user in the loop that is not widened.
2470   auto NeedsScalarIV = needsScalarInduction(EntryVal);
2471   if (!NeedsScalarIV) {
2472     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2473                                     State);
2474     return;
2475   }
2476 
2477   // Try to create a new independent vector induction variable. If we can't
2478   // create the phi node, we will splat the scalar induction variable in each
2479   // loop iteration.
2480   if (!shouldScalarizeInstruction(EntryVal)) {
2481     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2482                                     State);
2483     Value *ScalarIV = CreateScalarIV(Step);
2484     // Create scalar steps that can be used by instructions we will later
2485     // scalarize. Note that the addition of the scalar steps will not increase
2486     // the number of instructions in the loop in the common case prior to
2487     // InstCombine. We will be trading one vector extract for each scalar step.
2488     buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2489     return;
2490   }
2491 
2492   // All IV users are scalar instructions, so only emit a scalar IV, not a
2493   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2494   // predicate used by the masked loads/stores.
2495   Value *ScalarIV = CreateScalarIV(Step);
2496   if (!Cost->isScalarEpilogueAllowed())
2497     CreateSplatIV(ScalarIV, Step);
2498   buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2499 }
2500 
2501 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
2502                                           Instruction::BinaryOps BinOp) {
2503   // Create and check the types.
2504   auto *ValVTy = cast<VectorType>(Val->getType());
2505   ElementCount VLen = ValVTy->getElementCount();
2506 
2507   Type *STy = Val->getType()->getScalarType();
2508   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2509          "Induction Step must be an integer or FP");
2510   assert(Step->getType() == STy && "Step has wrong type");
2511 
2512   SmallVector<Constant *, 8> Indices;
2513 
2514   // Create a vector of consecutive numbers from zero to VF.
2515   VectorType *InitVecValVTy = ValVTy;
2516   Type *InitVecValSTy = STy;
2517   if (STy->isFloatingPointTy()) {
2518     InitVecValSTy =
2519         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2520     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2521   }
2522   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2523 
2524   // Add on StartIdx
2525   Value *StartIdxSplat = Builder.CreateVectorSplat(
2526       VLen, ConstantInt::get(InitVecValSTy, StartIdx));
2527   InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2528 
2529   if (STy->isIntegerTy()) {
2530     Step = Builder.CreateVectorSplat(VLen, Step);
2531     assert(Step->getType() == Val->getType() && "Invalid step vec");
2532     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2533     // which can be found from the original scalar operations.
2534     Step = Builder.CreateMul(InitVec, Step);
2535     return Builder.CreateAdd(Val, Step, "induction");
2536   }
2537 
2538   // Floating point induction.
2539   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2540          "Binary Opcode should be specified for FP induction");
2541   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2542   Step = Builder.CreateVectorSplat(VLen, Step);
2543   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2544   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2545 }
2546 
2547 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2548                                            Instruction *EntryVal,
2549                                            const InductionDescriptor &ID,
2550                                            VPValue *Def, VPValue *CastDef,
2551                                            VPTransformState &State) {
2552   // We shouldn't have to build scalar steps if we aren't vectorizing.
2553   assert(VF.isVector() && "VF should be greater than one");
2554   // Get the value type and ensure it and the step have the same integer type.
2555   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2556   assert(ScalarIVTy == Step->getType() &&
2557          "Val and Step should have the same type");
2558 
2559   // We build scalar steps for both integer and floating-point induction
2560   // variables. Here, we determine the kind of arithmetic we will perform.
2561   Instruction::BinaryOps AddOp;
2562   Instruction::BinaryOps MulOp;
2563   if (ScalarIVTy->isIntegerTy()) {
2564     AddOp = Instruction::Add;
2565     MulOp = Instruction::Mul;
2566   } else {
2567     AddOp = ID.getInductionOpcode();
2568     MulOp = Instruction::FMul;
2569   }
2570 
2571   // Determine the number of scalars we need to generate for each unroll
2572   // iteration. If EntryVal is uniform, we only need to generate the first
2573   // lane. Otherwise, we generate all VF values.
2574   bool IsUniform =
2575       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF);
2576   unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue();
2577   // Compute the scalar steps and save the results in State.
2578   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2579                                      ScalarIVTy->getScalarSizeInBits());
2580   Type *VecIVTy = nullptr;
2581   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2582   if (!IsUniform && VF.isScalable()) {
2583     VecIVTy = VectorType::get(ScalarIVTy, VF);
2584     UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF));
2585     SplatStep = Builder.CreateVectorSplat(VF, Step);
2586     SplatIV = Builder.CreateVectorSplat(VF, ScalarIV);
2587   }
2588 
2589   for (unsigned Part = 0; Part < UF; ++Part) {
2590     Value *StartIdx0 =
2591         createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF);
2592 
2593     if (!IsUniform && VF.isScalable()) {
2594       auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0);
2595       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2596       if (ScalarIVTy->isFloatingPointTy())
2597         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2598       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2599       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2600       State.set(Def, Add, Part);
2601       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2602                                             Part);
2603       // It's useful to record the lane values too for the known minimum number
2604       // of elements so we do those below. This improves the code quality when
2605       // trying to extract the first element, for example.
2606     }
2607 
2608     if (ScalarIVTy->isFloatingPointTy())
2609       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2610 
2611     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2612       Value *StartIdx = Builder.CreateBinOp(
2613           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2614       // The step returned by `createStepForVF` is a runtime-evaluated value
2615       // when VF is scalable. Otherwise, it should be folded into a Constant.
2616       assert((VF.isScalable() || isa<Constant>(StartIdx)) &&
2617              "Expected StartIdx to be folded to a constant when VF is not "
2618              "scalable");
2619       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2620       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2621       State.set(Def, Add, VPIteration(Part, Lane));
2622       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2623                                             Part, Lane);
2624     }
2625   }
2626 }
2627 
2628 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2629                                                     const VPIteration &Instance,
2630                                                     VPTransformState &State) {
2631   Value *ScalarInst = State.get(Def, Instance);
2632   Value *VectorValue = State.get(Def, Instance.Part);
2633   VectorValue = Builder.CreateInsertElement(
2634       VectorValue, ScalarInst,
2635       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2636   State.set(Def, VectorValue, Instance.Part);
2637 }
2638 
2639 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2640   assert(Vec->getType()->isVectorTy() && "Invalid type");
2641   return Builder.CreateVectorReverse(Vec, "reverse");
2642 }
2643 
2644 // Return whether we allow using masked interleave-groups (for dealing with
2645 // strided loads/stores that reside in predicated blocks, or for dealing
2646 // with gaps).
2647 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2648   // If an override option has been passed in for interleaved accesses, use it.
2649   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2650     return EnableMaskedInterleavedMemAccesses;
2651 
2652   return TTI.enableMaskedInterleavedAccessVectorization();
2653 }
2654 
2655 // Try to vectorize the interleave group that \p Instr belongs to.
2656 //
2657 // E.g. Translate following interleaved load group (factor = 3):
2658 //   for (i = 0; i < N; i+=3) {
2659 //     R = Pic[i];             // Member of index 0
2660 //     G = Pic[i+1];           // Member of index 1
2661 //     B = Pic[i+2];           // Member of index 2
2662 //     ... // do something to R, G, B
2663 //   }
2664 // To:
2665 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2666 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2667 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2668 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2669 //
2670 // Or translate following interleaved store group (factor = 3):
2671 //   for (i = 0; i < N; i+=3) {
2672 //     ... do something to R, G, B
2673 //     Pic[i]   = R;           // Member of index 0
2674 //     Pic[i+1] = G;           // Member of index 1
2675 //     Pic[i+2] = B;           // Member of index 2
2676 //   }
2677 // To:
2678 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2679 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2680 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2681 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2682 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2683 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2684     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2685     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2686     VPValue *BlockInMask) {
2687   Instruction *Instr = Group->getInsertPos();
2688   const DataLayout &DL = Instr->getModule()->getDataLayout();
2689 
2690   // Prepare for the vector type of the interleaved load/store.
2691   Type *ScalarTy = getMemInstValueType(Instr);
2692   unsigned InterleaveFactor = Group->getFactor();
2693   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2694   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2695 
2696   // Prepare for the new pointers.
2697   SmallVector<Value *, 2> AddrParts;
2698   unsigned Index = Group->getIndex(Instr);
2699 
2700   // TODO: extend the masked interleaved-group support to reversed access.
2701   assert((!BlockInMask || !Group->isReverse()) &&
2702          "Reversed masked interleave-group not supported.");
2703 
2704   // If the group is reverse, adjust the index to refer to the last vector lane
2705   // instead of the first. We adjust the index from the first vector lane,
2706   // rather than directly getting the pointer for lane VF - 1, because the
2707   // pointer operand of the interleaved access is supposed to be uniform. For
2708   // uniform instructions, we're only required to generate a value for the
2709   // first vector lane in each unroll iteration.
2710   if (Group->isReverse())
2711     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2712 
2713   for (unsigned Part = 0; Part < UF; Part++) {
2714     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2715     setDebugLocFromInst(Builder, AddrPart);
2716 
2717     // Notice current instruction could be any index. Need to adjust the address
2718     // to the member of index 0.
2719     //
2720     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2721     //       b = A[i];       // Member of index 0
2722     // Current pointer is pointed to A[i+1], adjust it to A[i].
2723     //
2724     // E.g.  A[i+1] = a;     // Member of index 1
2725     //       A[i]   = b;     // Member of index 0
2726     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2727     // Current pointer is pointed to A[i+2], adjust it to A[i].
2728 
2729     bool InBounds = false;
2730     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2731       InBounds = gep->isInBounds();
2732     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2733     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2734 
2735     // Cast to the vector pointer type.
2736     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2737     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2738     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2739   }
2740 
2741   setDebugLocFromInst(Builder, Instr);
2742   Value *PoisonVec = PoisonValue::get(VecTy);
2743 
2744   Value *MaskForGaps = nullptr;
2745   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2746     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2747     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2748   }
2749 
2750   // Vectorize the interleaved load group.
2751   if (isa<LoadInst>(Instr)) {
2752     // For each unroll part, create a wide load for the group.
2753     SmallVector<Value *, 2> NewLoads;
2754     for (unsigned Part = 0; Part < UF; Part++) {
2755       Instruction *NewLoad;
2756       if (BlockInMask || MaskForGaps) {
2757         assert(useMaskedInterleavedAccesses(*TTI) &&
2758                "masked interleaved groups are not allowed.");
2759         Value *GroupMask = MaskForGaps;
2760         if (BlockInMask) {
2761           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2762           Value *ShuffledMask = Builder.CreateShuffleVector(
2763               BlockInMaskPart,
2764               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2765               "interleaved.mask");
2766           GroupMask = MaskForGaps
2767                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2768                                                 MaskForGaps)
2769                           : ShuffledMask;
2770         }
2771         NewLoad =
2772             Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
2773                                      GroupMask, PoisonVec, "wide.masked.vec");
2774       }
2775       else
2776         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2777                                             Group->getAlign(), "wide.vec");
2778       Group->addMetadata(NewLoad);
2779       NewLoads.push_back(NewLoad);
2780     }
2781 
2782     // For each member in the group, shuffle out the appropriate data from the
2783     // wide loads.
2784     unsigned J = 0;
2785     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2786       Instruction *Member = Group->getMember(I);
2787 
2788       // Skip the gaps in the group.
2789       if (!Member)
2790         continue;
2791 
2792       auto StrideMask =
2793           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2794       for (unsigned Part = 0; Part < UF; Part++) {
2795         Value *StridedVec = Builder.CreateShuffleVector(
2796             NewLoads[Part], StrideMask, "strided.vec");
2797 
2798         // If this member has different type, cast the result type.
2799         if (Member->getType() != ScalarTy) {
2800           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2801           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2802           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2803         }
2804 
2805         if (Group->isReverse())
2806           StridedVec = reverseVector(StridedVec);
2807 
2808         State.set(VPDefs[J], StridedVec, Part);
2809       }
2810       ++J;
2811     }
2812     return;
2813   }
2814 
2815   // The sub vector type for current instruction.
2816   auto *SubVT = VectorType::get(ScalarTy, VF);
2817 
2818   // Vectorize the interleaved store group.
2819   for (unsigned Part = 0; Part < UF; Part++) {
2820     // Collect the stored vector from each member.
2821     SmallVector<Value *, 4> StoredVecs;
2822     for (unsigned i = 0; i < InterleaveFactor; i++) {
2823       // Interleaved store group doesn't allow a gap, so each index has a member
2824       assert(Group->getMember(i) && "Fail to get a member from an interleaved store group");
2825 
2826       Value *StoredVec = State.get(StoredValues[i], Part);
2827 
2828       if (Group->isReverse())
2829         StoredVec = reverseVector(StoredVec);
2830 
2831       // If this member has different type, cast it to a unified type.
2832 
2833       if (StoredVec->getType() != SubVT)
2834         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2835 
2836       StoredVecs.push_back(StoredVec);
2837     }
2838 
2839     // Concatenate all vectors into a wide vector.
2840     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2841 
2842     // Interleave the elements in the wide vector.
2843     Value *IVec = Builder.CreateShuffleVector(
2844         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2845         "interleaved.vec");
2846 
2847     Instruction *NewStoreInstr;
2848     if (BlockInMask) {
2849       Value *BlockInMaskPart = State.get(BlockInMask, Part);
2850       Value *ShuffledMask = Builder.CreateShuffleVector(
2851           BlockInMaskPart,
2852           createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2853           "interleaved.mask");
2854       NewStoreInstr = Builder.CreateMaskedStore(
2855           IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2856     }
2857     else
2858       NewStoreInstr =
2859           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2860 
2861     Group->addMetadata(NewStoreInstr);
2862   }
2863 }
2864 
2865 void InnerLoopVectorizer::vectorizeMemoryInstruction(
2866     Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr,
2867     VPValue *StoredValue, VPValue *BlockInMask) {
2868   // Attempt to issue a wide load.
2869   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2870   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2871 
2872   assert((LI || SI) && "Invalid Load/Store instruction");
2873   assert((!SI || StoredValue) && "No stored value provided for widened store");
2874   assert((!LI || !StoredValue) && "Stored value provided for widened load");
2875 
2876   LoopVectorizationCostModel::InstWidening Decision =
2877       Cost->getWideningDecision(Instr, VF);
2878   assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2879           Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2880           Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2881          "CM decision is not to widen the memory instruction");
2882 
2883   Type *ScalarDataTy = getMemInstValueType(Instr);
2884 
2885   auto *DataTy = VectorType::get(ScalarDataTy, VF);
2886   const Align Alignment = getLoadStoreAlignment(Instr);
2887 
2888   // Determine if the pointer operand of the access is either consecutive or
2889   // reverse consecutive.
2890   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2891   bool ConsecutiveStride =
2892       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2893   bool CreateGatherScatter =
2894       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2895 
2896   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2897   // gather/scatter. Otherwise Decision should have been to Scalarize.
2898   assert((ConsecutiveStride || CreateGatherScatter) &&
2899          "The instruction should be scalarized");
2900   (void)ConsecutiveStride;
2901 
2902   VectorParts BlockInMaskParts(UF);
2903   bool isMaskRequired = BlockInMask;
2904   if (isMaskRequired)
2905     for (unsigned Part = 0; Part < UF; ++Part)
2906       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2907 
2908   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2909     // Calculate the pointer for the specific unroll-part.
2910     GetElementPtrInst *PartPtr = nullptr;
2911 
2912     bool InBounds = false;
2913     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2914       InBounds = gep->isInBounds();
2915     if (Reverse) {
2916       // If the address is consecutive but reversed, then the
2917       // wide store needs to start at the last vector element.
2918       // RunTimeVF =  VScale * VF.getKnownMinValue()
2919       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
2920       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), VF);
2921       // NumElt = -Part * RunTimeVF
2922       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
2923       // LastLane = 1 - RunTimeVF
2924       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
2925       PartPtr =
2926           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
2927       PartPtr->setIsInBounds(InBounds);
2928       PartPtr = cast<GetElementPtrInst>(
2929           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
2930       PartPtr->setIsInBounds(InBounds);
2931       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2932         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2933     } else {
2934       Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF);
2935       PartPtr = cast<GetElementPtrInst>(
2936           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
2937       PartPtr->setIsInBounds(InBounds);
2938     }
2939 
2940     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2941     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2942   };
2943 
2944   // Handle Stores:
2945   if (SI) {
2946     setDebugLocFromInst(Builder, SI);
2947 
2948     for (unsigned Part = 0; Part < UF; ++Part) {
2949       Instruction *NewSI = nullptr;
2950       Value *StoredVal = State.get(StoredValue, Part);
2951       if (CreateGatherScatter) {
2952         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2953         Value *VectorGep = State.get(Addr, Part);
2954         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2955                                             MaskPart);
2956       } else {
2957         if (Reverse) {
2958           // If we store to reverse consecutive memory locations, then we need
2959           // to reverse the order of elements in the stored value.
2960           StoredVal = reverseVector(StoredVal);
2961           // We don't want to update the value in the map as it might be used in
2962           // another expression. So don't call resetVectorValue(StoredVal).
2963         }
2964         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2965         if (isMaskRequired)
2966           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2967                                             BlockInMaskParts[Part]);
2968         else
2969           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2970       }
2971       addMetadata(NewSI, SI);
2972     }
2973     return;
2974   }
2975 
2976   // Handle loads.
2977   assert(LI && "Must have a load instruction");
2978   setDebugLocFromInst(Builder, LI);
2979   for (unsigned Part = 0; Part < UF; ++Part) {
2980     Value *NewLI;
2981     if (CreateGatherScatter) {
2982       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2983       Value *VectorGep = State.get(Addr, Part);
2984       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2985                                          nullptr, "wide.masked.gather");
2986       addMetadata(NewLI, LI);
2987     } else {
2988       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0)));
2989       if (isMaskRequired)
2990         NewLI = Builder.CreateMaskedLoad(
2991             VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy),
2992             "wide.masked.load");
2993       else
2994         NewLI =
2995             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
2996 
2997       // Add metadata to the load, but setVectorValue to the reverse shuffle.
2998       addMetadata(NewLI, LI);
2999       if (Reverse)
3000         NewLI = reverseVector(NewLI);
3001     }
3002 
3003     State.set(Def, NewLI, Part);
3004   }
3005 }
3006 
3007 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def,
3008                                                VPUser &User,
3009                                                const VPIteration &Instance,
3010                                                bool IfPredicateInstr,
3011                                                VPTransformState &State) {
3012   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
3013 
3014   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
3015   // the first lane and part.
3016   if (isa<NoAliasScopeDeclInst>(Instr))
3017     if (!Instance.isFirstIteration())
3018       return;
3019 
3020   setDebugLocFromInst(Builder, Instr);
3021 
3022   // Does this instruction return a value ?
3023   bool IsVoidRetTy = Instr->getType()->isVoidTy();
3024 
3025   Instruction *Cloned = Instr->clone();
3026   if (!IsVoidRetTy)
3027     Cloned->setName(Instr->getName() + ".cloned");
3028 
3029   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
3030                                Builder.GetInsertPoint());
3031   // Replace the operands of the cloned instructions with their scalar
3032   // equivalents in the new loop.
3033   for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) {
3034     auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op));
3035     auto InputInstance = Instance;
3036     if (!Operand || !OrigLoop->contains(Operand) ||
3037         (Cost->isUniformAfterVectorization(Operand, State.VF)))
3038       InputInstance.Lane = VPLane::getFirstLane();
3039     auto *NewOp = State.get(User.getOperand(op), InputInstance);
3040     Cloned->setOperand(op, NewOp);
3041   }
3042   addNewMetadata(Cloned, Instr);
3043 
3044   // Place the cloned scalar in the new loop.
3045   Builder.Insert(Cloned);
3046 
3047   State.set(Def, Cloned, Instance);
3048 
3049   // If we just cloned a new assumption, add it the assumption cache.
3050   if (auto *II = dyn_cast<AssumeInst>(Cloned))
3051     AC->registerAssumption(II);
3052 
3053   // End if-block.
3054   if (IfPredicateInstr)
3055     PredicatedInstructions.push_back(Cloned);
3056 }
3057 
3058 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
3059                                                       Value *End, Value *Step,
3060                                                       Instruction *DL) {
3061   BasicBlock *Header = L->getHeader();
3062   BasicBlock *Latch = L->getLoopLatch();
3063   // As we're just creating this loop, it's possible no latch exists
3064   // yet. If so, use the header as this will be a single block loop.
3065   if (!Latch)
3066     Latch = Header;
3067 
3068   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
3069   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
3070   setDebugLocFromInst(Builder, OldInst);
3071   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
3072 
3073   Builder.SetInsertPoint(Latch->getTerminator());
3074   setDebugLocFromInst(Builder, OldInst);
3075 
3076   // Create i+1 and fill the PHINode.
3077   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
3078   Induction->addIncoming(Start, L->getLoopPreheader());
3079   Induction->addIncoming(Next, Latch);
3080   // Create the compare.
3081   Value *ICmp = Builder.CreateICmpEQ(Next, End);
3082   Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header);
3083 
3084   // Now we have two terminators. Remove the old one from the block.
3085   Latch->getTerminator()->eraseFromParent();
3086 
3087   return Induction;
3088 }
3089 
3090 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3091   if (TripCount)
3092     return TripCount;
3093 
3094   assert(L && "Create Trip Count for null loop.");
3095   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3096   // Find the loop boundaries.
3097   ScalarEvolution *SE = PSE.getSE();
3098   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3099   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
3100          "Invalid loop count");
3101 
3102   Type *IdxTy = Legal->getWidestInductionType();
3103   assert(IdxTy && "No type for induction");
3104 
3105   // The exit count might have the type of i64 while the phi is i32. This can
3106   // happen if we have an induction variable that is sign extended before the
3107   // compare. The only way that we get a backedge taken count is that the
3108   // induction variable was signed and as such will not overflow. In such a case
3109   // truncation is legal.
3110   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
3111       IdxTy->getPrimitiveSizeInBits())
3112     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3113   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3114 
3115   // Get the total trip count from the count by adding 1.
3116   const SCEV *ExitCount = SE->getAddExpr(
3117       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3118 
3119   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3120 
3121   // Expand the trip count and place the new instructions in the preheader.
3122   // Notice that the pre-header does not change, only the loop body.
3123   SCEVExpander Exp(*SE, DL, "induction");
3124 
3125   // Count holds the overall loop count (N).
3126   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3127                                 L->getLoopPreheader()->getTerminator());
3128 
3129   if (TripCount->getType()->isPointerTy())
3130     TripCount =
3131         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3132                                     L->getLoopPreheader()->getTerminator());
3133 
3134   return TripCount;
3135 }
3136 
3137 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3138   if (VectorTripCount)
3139     return VectorTripCount;
3140 
3141   Value *TC = getOrCreateTripCount(L);
3142   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3143 
3144   Type *Ty = TC->getType();
3145   // This is where we can make the step a runtime constant.
3146   Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF);
3147 
3148   // If the tail is to be folded by masking, round the number of iterations N
3149   // up to a multiple of Step instead of rounding down. This is done by first
3150   // adding Step-1 and then rounding down. Note that it's ok if this addition
3151   // overflows: the vector induction variable will eventually wrap to zero given
3152   // that it starts at zero and its Step is a power of two; the loop will then
3153   // exit, with the last early-exit vector comparison also producing all-true.
3154   if (Cost->foldTailByMasking()) {
3155     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
3156            "VF*UF must be a power of 2 when folding tail by masking");
3157     assert(!VF.isScalable() &&
3158            "Tail folding not yet supported for scalable vectors");
3159     TC = Builder.CreateAdd(
3160         TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up");
3161   }
3162 
3163   // Now we need to generate the expression for the part of the loop that the
3164   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3165   // iterations are not required for correctness, or N - Step, otherwise. Step
3166   // is equal to the vectorization factor (number of SIMD elements) times the
3167   // unroll factor (number of SIMD instructions).
3168   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3169 
3170   // There are two cases where we need to ensure (at least) the last iteration
3171   // runs in the scalar remainder loop. Thus, if the step evenly divides
3172   // the trip count, we set the remainder to be equal to the step. If the step
3173   // does not evenly divide the trip count, no adjustment is necessary since
3174   // there will already be scalar iterations. Note that the minimum iterations
3175   // check ensures that N >= Step. The cases are:
3176   // 1) If there is a non-reversed interleaved group that may speculatively
3177   //    access memory out-of-bounds.
3178   // 2) If any instruction may follow a conditionally taken exit. That is, if
3179   //    the loop contains multiple exiting blocks, or a single exiting block
3180   //    which is not the latch.
3181   if (VF.isVector() && Cost->requiresScalarEpilogue()) {
3182     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3183     R = Builder.CreateSelect(IsZero, Step, R);
3184   }
3185 
3186   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3187 
3188   return VectorTripCount;
3189 }
3190 
3191 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3192                                                    const DataLayout &DL) {
3193   // Verify that V is a vector type with same number of elements as DstVTy.
3194   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
3195   unsigned VF = DstFVTy->getNumElements();
3196   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
3197   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3198   Type *SrcElemTy = SrcVecTy->getElementType();
3199   Type *DstElemTy = DstFVTy->getElementType();
3200   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3201          "Vector elements must have same size");
3202 
3203   // Do a direct cast if element types are castable.
3204   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3205     return Builder.CreateBitOrPointerCast(V, DstFVTy);
3206   }
3207   // V cannot be directly casted to desired vector type.
3208   // May happen when V is a floating point vector but DstVTy is a vector of
3209   // pointers or vice-versa. Handle this using a two-step bitcast using an
3210   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3211   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3212          "Only one type should be a pointer type");
3213   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3214          "Only one type should be a floating point type");
3215   Type *IntTy =
3216       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3217   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
3218   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3219   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
3220 }
3221 
3222 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3223                                                          BasicBlock *Bypass) {
3224   Value *Count = getOrCreateTripCount(L);
3225   // Reuse existing vector loop preheader for TC checks.
3226   // Note that new preheader block is generated for vector loop.
3227   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
3228   IRBuilder<> Builder(TCCheckBlock->getTerminator());
3229 
3230   // Generate code to check if the loop's trip count is less than VF * UF, or
3231   // equal to it in case a scalar epilogue is required; this implies that the
3232   // vector trip count is zero. This check also covers the case where adding one
3233   // to the backedge-taken count overflowed leading to an incorrect trip count
3234   // of zero. In this case we will also jump to the scalar loop.
3235   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
3236                                           : ICmpInst::ICMP_ULT;
3237 
3238   // If tail is to be folded, vector loop takes care of all iterations.
3239   Value *CheckMinIters = Builder.getFalse();
3240   if (!Cost->foldTailByMasking()) {
3241     Value *Step =
3242         createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF);
3243     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3244   }
3245   // Create new preheader for vector loop.
3246   LoopVectorPreHeader =
3247       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3248                  "vector.ph");
3249 
3250   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3251                                DT->getNode(Bypass)->getIDom()) &&
3252          "TC check is expected to dominate Bypass");
3253 
3254   // Update dominator for Bypass & LoopExit.
3255   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3256   DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3257 
3258   ReplaceInstWithInst(
3259       TCCheckBlock->getTerminator(),
3260       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3261   LoopBypassBlocks.push_back(TCCheckBlock);
3262 }
3263 
3264 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3265 
3266   BasicBlock *const SCEVCheckBlock =
3267       RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock);
3268   if (!SCEVCheckBlock)
3269     return nullptr;
3270 
3271   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3272            (OptForSizeBasedOnProfile &&
3273             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3274          "Cannot SCEV check stride or overflow when optimizing for size");
3275 
3276 
3277   // Update dominator only if this is first RT check.
3278   if (LoopBypassBlocks.empty()) {
3279     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3280     DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3281   }
3282 
3283   LoopBypassBlocks.push_back(SCEVCheckBlock);
3284   AddedSafetyChecks = true;
3285   return SCEVCheckBlock;
3286 }
3287 
3288 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
3289                                                       BasicBlock *Bypass) {
3290   // VPlan-native path does not do any analysis for runtime checks currently.
3291   if (EnableVPlanNativePath)
3292     return nullptr;
3293 
3294   BasicBlock *const MemCheckBlock =
3295       RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader);
3296 
3297   // Check if we generated code that checks in runtime if arrays overlap. We put
3298   // the checks into a separate block to make the more common case of few
3299   // elements faster.
3300   if (!MemCheckBlock)
3301     return nullptr;
3302 
3303   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3304     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3305            "Cannot emit memory checks when optimizing for size, unless forced "
3306            "to vectorize.");
3307     ORE->emit([&]() {
3308       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3309                                         L->getStartLoc(), L->getHeader())
3310              << "Code-size may be reduced by not forcing "
3311                 "vectorization, or by source-code modifications "
3312                 "eliminating the need for runtime checks "
3313                 "(e.g., adding 'restrict').";
3314     });
3315   }
3316 
3317   LoopBypassBlocks.push_back(MemCheckBlock);
3318 
3319   AddedSafetyChecks = true;
3320 
3321   // We currently don't use LoopVersioning for the actual loop cloning but we
3322   // still use it to add the noalias metadata.
3323   LVer = std::make_unique<LoopVersioning>(
3324       *Legal->getLAI(),
3325       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3326       DT, PSE.getSE());
3327   LVer->prepareNoAliasMetadata();
3328   return MemCheckBlock;
3329 }
3330 
3331 Value *InnerLoopVectorizer::emitTransformedIndex(
3332     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3333     const InductionDescriptor &ID) const {
3334 
3335   SCEVExpander Exp(*SE, DL, "induction");
3336   auto Step = ID.getStep();
3337   auto StartValue = ID.getStartValue();
3338   assert(Index->getType() == Step->getType() &&
3339          "Index type does not match StepValue type");
3340 
3341   // Note: the IR at this point is broken. We cannot use SE to create any new
3342   // SCEV and then expand it, hoping that SCEV's simplification will give us
3343   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3344   // lead to various SCEV crashes. So all we can do is to use builder and rely
3345   // on InstCombine for future simplifications. Here we handle some trivial
3346   // cases only.
3347   auto CreateAdd = [&B](Value *X, Value *Y) {
3348     assert(X->getType() == Y->getType() && "Types don't match!");
3349     if (auto *CX = dyn_cast<ConstantInt>(X))
3350       if (CX->isZero())
3351         return Y;
3352     if (auto *CY = dyn_cast<ConstantInt>(Y))
3353       if (CY->isZero())
3354         return X;
3355     return B.CreateAdd(X, Y);
3356   };
3357 
3358   auto CreateMul = [&B](Value *X, Value *Y) {
3359     assert(X->getType() == Y->getType() && "Types don't match!");
3360     if (auto *CX = dyn_cast<ConstantInt>(X))
3361       if (CX->isOne())
3362         return Y;
3363     if (auto *CY = dyn_cast<ConstantInt>(Y))
3364       if (CY->isOne())
3365         return X;
3366     return B.CreateMul(X, Y);
3367   };
3368 
3369   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3370   // loop, choose the end of the vector loop header (=LoopVectorBody), because
3371   // the DomTree is not kept up-to-date for additional blocks generated in the
3372   // vector loop. By using the header as insertion point, we guarantee that the
3373   // expanded instructions dominate all their uses.
3374   auto GetInsertPoint = [this, &B]() {
3375     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3376     if (InsertBB != LoopVectorBody &&
3377         LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB))
3378       return LoopVectorBody->getTerminator();
3379     return &*B.GetInsertPoint();
3380   };
3381 
3382   switch (ID.getKind()) {
3383   case InductionDescriptor::IK_IntInduction: {
3384     assert(Index->getType() == StartValue->getType() &&
3385            "Index type does not match StartValue type");
3386     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3387       return B.CreateSub(StartValue, Index);
3388     auto *Offset = CreateMul(
3389         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3390     return CreateAdd(StartValue, Offset);
3391   }
3392   case InductionDescriptor::IK_PtrInduction: {
3393     assert(isa<SCEVConstant>(Step) &&
3394            "Expected constant step for pointer induction");
3395     return B.CreateGEP(
3396         StartValue->getType()->getPointerElementType(), StartValue,
3397         CreateMul(Index,
3398                   Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())));
3399   }
3400   case InductionDescriptor::IK_FpInduction: {
3401     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3402     auto InductionBinOp = ID.getInductionBinOp();
3403     assert(InductionBinOp &&
3404            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3405             InductionBinOp->getOpcode() == Instruction::FSub) &&
3406            "Original bin op should be defined for FP induction");
3407 
3408     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3409     Value *MulExp = B.CreateFMul(StepValue, Index);
3410     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3411                          "induction");
3412   }
3413   case InductionDescriptor::IK_NoInduction:
3414     return nullptr;
3415   }
3416   llvm_unreachable("invalid enum");
3417 }
3418 
3419 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3420   LoopScalarBody = OrigLoop->getHeader();
3421   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3422   LoopExitBlock = OrigLoop->getUniqueExitBlock();
3423   assert(LoopExitBlock && "Must have an exit block");
3424   assert(LoopVectorPreHeader && "Invalid loop structure");
3425 
3426   LoopMiddleBlock =
3427       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3428                  LI, nullptr, Twine(Prefix) + "middle.block");
3429   LoopScalarPreHeader =
3430       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3431                  nullptr, Twine(Prefix) + "scalar.ph");
3432 
3433   // Set up branch from middle block to the exit and scalar preheader blocks.
3434   // completeLoopSkeleton will update the condition to use an iteration check,
3435   // if required to decide whether to execute the remainder.
3436   BranchInst *BrInst =
3437       BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue());
3438   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3439   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3440   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3441 
3442   // We intentionally don't let SplitBlock to update LoopInfo since
3443   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3444   // LoopVectorBody is explicitly added to the correct place few lines later.
3445   LoopVectorBody =
3446       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3447                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3448 
3449   // Update dominator for loop exit.
3450   DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3451 
3452   // Create and register the new vector loop.
3453   Loop *Lp = LI->AllocateLoop();
3454   Loop *ParentLoop = OrigLoop->getParentLoop();
3455 
3456   // Insert the new loop into the loop nest and register the new basic blocks
3457   // before calling any utilities such as SCEV that require valid LoopInfo.
3458   if (ParentLoop) {
3459     ParentLoop->addChildLoop(Lp);
3460   } else {
3461     LI->addTopLevelLoop(Lp);
3462   }
3463   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3464   return Lp;
3465 }
3466 
3467 void InnerLoopVectorizer::createInductionResumeValues(
3468     Loop *L, Value *VectorTripCount,
3469     std::pair<BasicBlock *, Value *> AdditionalBypass) {
3470   assert(VectorTripCount && L && "Expected valid arguments");
3471   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3472           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3473          "Inconsistent information about additional bypass.");
3474   // We are going to resume the execution of the scalar loop.
3475   // Go over all of the induction variables that we found and fix the
3476   // PHIs that are left in the scalar version of the loop.
3477   // The starting values of PHI nodes depend on the counter of the last
3478   // iteration in the vectorized loop.
3479   // If we come from a bypass edge then we need to start from the original
3480   // start value.
3481   for (auto &InductionEntry : Legal->getInductionVars()) {
3482     PHINode *OrigPhi = InductionEntry.first;
3483     InductionDescriptor II = InductionEntry.second;
3484 
3485     // Create phi nodes to merge from the  backedge-taken check block.
3486     PHINode *BCResumeVal =
3487         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3488                         LoopScalarPreHeader->getTerminator());
3489     // Copy original phi DL over to the new one.
3490     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3491     Value *&EndValue = IVEndValues[OrigPhi];
3492     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3493     if (OrigPhi == OldInduction) {
3494       // We know what the end value is.
3495       EndValue = VectorTripCount;
3496     } else {
3497       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3498 
3499       // Fast-math-flags propagate from the original induction instruction.
3500       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3501         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3502 
3503       Type *StepType = II.getStep()->getType();
3504       Instruction::CastOps CastOp =
3505           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3506       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3507       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3508       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3509       EndValue->setName("ind.end");
3510 
3511       // Compute the end value for the additional bypass (if applicable).
3512       if (AdditionalBypass.first) {
3513         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3514         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3515                                          StepType, true);
3516         CRD =
3517             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3518         EndValueFromAdditionalBypass =
3519             emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3520         EndValueFromAdditionalBypass->setName("ind.end");
3521       }
3522     }
3523     // The new PHI merges the original incoming value, in case of a bypass,
3524     // or the value at the end of the vectorized loop.
3525     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3526 
3527     // Fix the scalar body counter (PHI node).
3528     // The old induction's phi node in the scalar body needs the truncated
3529     // value.
3530     for (BasicBlock *BB : LoopBypassBlocks)
3531       BCResumeVal->addIncoming(II.getStartValue(), BB);
3532 
3533     if (AdditionalBypass.first)
3534       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3535                                             EndValueFromAdditionalBypass);
3536 
3537     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3538   }
3539 }
3540 
3541 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3542                                                       MDNode *OrigLoopID) {
3543   assert(L && "Expected valid loop.");
3544 
3545   // The trip counts should be cached by now.
3546   Value *Count = getOrCreateTripCount(L);
3547   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3548 
3549   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3550 
3551   // Add a check in the middle block to see if we have completed
3552   // all of the iterations in the first vector loop.
3553   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3554   // If tail is to be folded, we know we don't need to run the remainder.
3555   if (!Cost->foldTailByMasking()) {
3556     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3557                                         Count, VectorTripCount, "cmp.n",
3558                                         LoopMiddleBlock->getTerminator());
3559 
3560     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3561     // of the corresponding compare because they may have ended up with
3562     // different line numbers and we want to avoid awkward line stepping while
3563     // debugging. Eg. if the compare has got a line number inside the loop.
3564     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3565     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3566   }
3567 
3568   // Get ready to start creating new instructions into the vectorized body.
3569   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3570          "Inconsistent vector loop preheader");
3571   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3572 
3573   Optional<MDNode *> VectorizedLoopID =
3574       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3575                                       LLVMLoopVectorizeFollowupVectorized});
3576   if (VectorizedLoopID.hasValue()) {
3577     L->setLoopID(VectorizedLoopID.getValue());
3578 
3579     // Do not setAlreadyVectorized if loop attributes have been defined
3580     // explicitly.
3581     return LoopVectorPreHeader;
3582   }
3583 
3584   // Keep all loop hints from the original loop on the vector loop (we'll
3585   // replace the vectorizer-specific hints below).
3586   if (MDNode *LID = OrigLoop->getLoopID())
3587     L->setLoopID(LID);
3588 
3589   LoopVectorizeHints Hints(L, true, *ORE);
3590   Hints.setAlreadyVectorized();
3591 
3592 #ifdef EXPENSIVE_CHECKS
3593   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3594   LI->verify(*DT);
3595 #endif
3596 
3597   return LoopVectorPreHeader;
3598 }
3599 
3600 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3601   /*
3602    In this function we generate a new loop. The new loop will contain
3603    the vectorized instructions while the old loop will continue to run the
3604    scalar remainder.
3605 
3606        [ ] <-- loop iteration number check.
3607     /   |
3608    /    v
3609   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3610   |  /  |
3611   | /   v
3612   ||   [ ]     <-- vector pre header.
3613   |/    |
3614   |     v
3615   |    [  ] \
3616   |    [  ]_|   <-- vector loop.
3617   |     |
3618   |     v
3619   |   -[ ]   <--- middle-block.
3620   |  /  |
3621   | /   v
3622   -|- >[ ]     <--- new preheader.
3623    |    |
3624    |    v
3625    |   [ ] \
3626    |   [ ]_|   <-- old scalar loop to handle remainder.
3627     \   |
3628      \  v
3629       >[ ]     <-- exit block.
3630    ...
3631    */
3632 
3633   // Get the metadata of the original loop before it gets modified.
3634   MDNode *OrigLoopID = OrigLoop->getLoopID();
3635 
3636   // Workaround!  Compute the trip count of the original loop and cache it
3637   // before we start modifying the CFG.  This code has a systemic problem
3638   // wherein it tries to run analysis over partially constructed IR; this is
3639   // wrong, and not simply for SCEV.  The trip count of the original loop
3640   // simply happens to be prone to hitting this in practice.  In theory, we
3641   // can hit the same issue for any SCEV, or ValueTracking query done during
3642   // mutation.  See PR49900.
3643   getOrCreateTripCount(OrigLoop);
3644 
3645   // Create an empty vector loop, and prepare basic blocks for the runtime
3646   // checks.
3647   Loop *Lp = createVectorLoopSkeleton("");
3648 
3649   // Now, compare the new count to zero. If it is zero skip the vector loop and
3650   // jump to the scalar loop. This check also covers the case where the
3651   // backedge-taken count is uint##_max: adding one to it will overflow leading
3652   // to an incorrect trip count of zero. In this (rare) case we will also jump
3653   // to the scalar loop.
3654   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3655 
3656   // Generate the code to check any assumptions that we've made for SCEV
3657   // expressions.
3658   emitSCEVChecks(Lp, LoopScalarPreHeader);
3659 
3660   // Generate the code that checks in runtime if arrays overlap. We put the
3661   // checks into a separate block to make the more common case of few elements
3662   // faster.
3663   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3664 
3665   // Some loops have a single integer induction variable, while other loops
3666   // don't. One example is c++ iterators that often have multiple pointer
3667   // induction variables. In the code below we also support a case where we
3668   // don't have a single induction variable.
3669   //
3670   // We try to obtain an induction variable from the original loop as hard
3671   // as possible. However if we don't find one that:
3672   //   - is an integer
3673   //   - counts from zero, stepping by one
3674   //   - is the size of the widest induction variable type
3675   // then we create a new one.
3676   OldInduction = Legal->getPrimaryInduction();
3677   Type *IdxTy = Legal->getWidestInductionType();
3678   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3679   // The loop step is equal to the vectorization factor (num of SIMD elements)
3680   // times the unroll factor (num of SIMD instructions).
3681   Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt());
3682   Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF);
3683   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3684   Induction =
3685       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3686                               getDebugLocFromInstOrOperands(OldInduction));
3687 
3688   // Emit phis for the new starting index of the scalar loop.
3689   createInductionResumeValues(Lp, CountRoundDown);
3690 
3691   return completeLoopSkeleton(Lp, OrigLoopID);
3692 }
3693 
3694 // Fix up external users of the induction variable. At this point, we are
3695 // in LCSSA form, with all external PHIs that use the IV having one input value,
3696 // coming from the remainder loop. We need those PHIs to also have a correct
3697 // value for the IV when arriving directly from the middle block.
3698 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3699                                        const InductionDescriptor &II,
3700                                        Value *CountRoundDown, Value *EndValue,
3701                                        BasicBlock *MiddleBlock) {
3702   // There are two kinds of external IV usages - those that use the value
3703   // computed in the last iteration (the PHI) and those that use the penultimate
3704   // value (the value that feeds into the phi from the loop latch).
3705   // We allow both, but they, obviously, have different values.
3706 
3707   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3708 
3709   DenseMap<Value *, Value *> MissingVals;
3710 
3711   // An external user of the last iteration's value should see the value that
3712   // the remainder loop uses to initialize its own IV.
3713   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3714   for (User *U : PostInc->users()) {
3715     Instruction *UI = cast<Instruction>(U);
3716     if (!OrigLoop->contains(UI)) {
3717       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3718       MissingVals[UI] = EndValue;
3719     }
3720   }
3721 
3722   // An external user of the penultimate value need to see EndValue - Step.
3723   // The simplest way to get this is to recompute it from the constituent SCEVs,
3724   // that is Start + (Step * (CRD - 1)).
3725   for (User *U : OrigPhi->users()) {
3726     auto *UI = cast<Instruction>(U);
3727     if (!OrigLoop->contains(UI)) {
3728       const DataLayout &DL =
3729           OrigLoop->getHeader()->getModule()->getDataLayout();
3730       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3731 
3732       IRBuilder<> B(MiddleBlock->getTerminator());
3733 
3734       // Fast-math-flags propagate from the original induction instruction.
3735       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3736         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3737 
3738       Value *CountMinusOne = B.CreateSub(
3739           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3740       Value *CMO =
3741           !II.getStep()->getType()->isIntegerTy()
3742               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3743                              II.getStep()->getType())
3744               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3745       CMO->setName("cast.cmo");
3746       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3747       Escape->setName("ind.escape");
3748       MissingVals[UI] = Escape;
3749     }
3750   }
3751 
3752   for (auto &I : MissingVals) {
3753     PHINode *PHI = cast<PHINode>(I.first);
3754     // One corner case we have to handle is two IVs "chasing" each-other,
3755     // that is %IV2 = phi [...], [ %IV1, %latch ]
3756     // In this case, if IV1 has an external use, we need to avoid adding both
3757     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3758     // don't already have an incoming value for the middle block.
3759     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3760       PHI->addIncoming(I.second, MiddleBlock);
3761   }
3762 }
3763 
3764 namespace {
3765 
3766 struct CSEDenseMapInfo {
3767   static bool canHandle(const Instruction *I) {
3768     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3769            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3770   }
3771 
3772   static inline Instruction *getEmptyKey() {
3773     return DenseMapInfo<Instruction *>::getEmptyKey();
3774   }
3775 
3776   static inline Instruction *getTombstoneKey() {
3777     return DenseMapInfo<Instruction *>::getTombstoneKey();
3778   }
3779 
3780   static unsigned getHashValue(const Instruction *I) {
3781     assert(canHandle(I) && "Unknown instruction!");
3782     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3783                                                            I->value_op_end()));
3784   }
3785 
3786   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3787     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3788         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3789       return LHS == RHS;
3790     return LHS->isIdenticalTo(RHS);
3791   }
3792 };
3793 
3794 } // end anonymous namespace
3795 
3796 ///Perform cse of induction variable instructions.
3797 static void cse(BasicBlock *BB) {
3798   // Perform simple cse.
3799   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3800   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3801     Instruction *In = &*I++;
3802 
3803     if (!CSEDenseMapInfo::canHandle(In))
3804       continue;
3805 
3806     // Check if we can replace this instruction with any of the
3807     // visited instructions.
3808     if (Instruction *V = CSEMap.lookup(In)) {
3809       In->replaceAllUsesWith(V);
3810       In->eraseFromParent();
3811       continue;
3812     }
3813 
3814     CSEMap[In] = In;
3815   }
3816 }
3817 
3818 InstructionCost
3819 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3820                                               bool &NeedToScalarize) const {
3821   Function *F = CI->getCalledFunction();
3822   Type *ScalarRetTy = CI->getType();
3823   SmallVector<Type *, 4> Tys, ScalarTys;
3824   for (auto &ArgOp : CI->arg_operands())
3825     ScalarTys.push_back(ArgOp->getType());
3826 
3827   // Estimate cost of scalarized vector call. The source operands are assumed
3828   // to be vectors, so we need to extract individual elements from there,
3829   // execute VF scalar calls, and then gather the result into the vector return
3830   // value.
3831   InstructionCost ScalarCallCost =
3832       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3833   if (VF.isScalar())
3834     return ScalarCallCost;
3835 
3836   // Compute corresponding vector type for return value and arguments.
3837   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3838   for (Type *ScalarTy : ScalarTys)
3839     Tys.push_back(ToVectorTy(ScalarTy, VF));
3840 
3841   // Compute costs of unpacking argument values for the scalar calls and
3842   // packing the return values to a vector.
3843   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3844 
3845   InstructionCost Cost =
3846       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3847 
3848   // If we can't emit a vector call for this function, then the currently found
3849   // cost is the cost we need to return.
3850   NeedToScalarize = true;
3851   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3852   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3853 
3854   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3855     return Cost;
3856 
3857   // If the corresponding vector cost is cheaper, return its cost.
3858   InstructionCost VectorCallCost =
3859       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3860   if (VectorCallCost < Cost) {
3861     NeedToScalarize = false;
3862     Cost = VectorCallCost;
3863   }
3864   return Cost;
3865 }
3866 
3867 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3868   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3869     return Elt;
3870   return VectorType::get(Elt, VF);
3871 }
3872 
3873 InstructionCost
3874 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3875                                                    ElementCount VF) const {
3876   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3877   assert(ID && "Expected intrinsic call!");
3878   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3879   FastMathFlags FMF;
3880   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3881     FMF = FPMO->getFastMathFlags();
3882 
3883   SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end());
3884   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3885   SmallVector<Type *> ParamTys;
3886   std::transform(FTy->param_begin(), FTy->param_end(),
3887                  std::back_inserter(ParamTys),
3888                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3889 
3890   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3891                                     dyn_cast<IntrinsicInst>(CI));
3892   return TTI.getIntrinsicInstrCost(CostAttrs,
3893                                    TargetTransformInfo::TCK_RecipThroughput);
3894 }
3895 
3896 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3897   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3898   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3899   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3900 }
3901 
3902 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3903   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3904   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3905   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3906 }
3907 
3908 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3909   // For every instruction `I` in MinBWs, truncate the operands, create a
3910   // truncated version of `I` and reextend its result. InstCombine runs
3911   // later and will remove any ext/trunc pairs.
3912   SmallPtrSet<Value *, 4> Erased;
3913   for (const auto &KV : Cost->getMinimalBitwidths()) {
3914     // If the value wasn't vectorized, we must maintain the original scalar
3915     // type. The absence of the value from State indicates that it
3916     // wasn't vectorized.
3917     VPValue *Def = State.Plan->getVPValue(KV.first);
3918     if (!State.hasAnyVectorValue(Def))
3919       continue;
3920     for (unsigned Part = 0; Part < UF; ++Part) {
3921       Value *I = State.get(Def, Part);
3922       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3923         continue;
3924       Type *OriginalTy = I->getType();
3925       Type *ScalarTruncatedTy =
3926           IntegerType::get(OriginalTy->getContext(), KV.second);
3927       auto *TruncatedTy = FixedVectorType::get(
3928           ScalarTruncatedTy,
3929           cast<FixedVectorType>(OriginalTy)->getNumElements());
3930       if (TruncatedTy == OriginalTy)
3931         continue;
3932 
3933       IRBuilder<> B(cast<Instruction>(I));
3934       auto ShrinkOperand = [&](Value *V) -> Value * {
3935         if (auto *ZI = dyn_cast<ZExtInst>(V))
3936           if (ZI->getSrcTy() == TruncatedTy)
3937             return ZI->getOperand(0);
3938         return B.CreateZExtOrTrunc(V, TruncatedTy);
3939       };
3940 
3941       // The actual instruction modification depends on the instruction type,
3942       // unfortunately.
3943       Value *NewI = nullptr;
3944       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3945         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3946                              ShrinkOperand(BO->getOperand(1)));
3947 
3948         // Any wrapping introduced by shrinking this operation shouldn't be
3949         // considered undefined behavior. So, we can't unconditionally copy
3950         // arithmetic wrapping flags to NewI.
3951         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3952       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3953         NewI =
3954             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3955                          ShrinkOperand(CI->getOperand(1)));
3956       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3957         NewI = B.CreateSelect(SI->getCondition(),
3958                               ShrinkOperand(SI->getTrueValue()),
3959                               ShrinkOperand(SI->getFalseValue()));
3960       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3961         switch (CI->getOpcode()) {
3962         default:
3963           llvm_unreachable("Unhandled cast!");
3964         case Instruction::Trunc:
3965           NewI = ShrinkOperand(CI->getOperand(0));
3966           break;
3967         case Instruction::SExt:
3968           NewI = B.CreateSExtOrTrunc(
3969               CI->getOperand(0),
3970               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3971           break;
3972         case Instruction::ZExt:
3973           NewI = B.CreateZExtOrTrunc(
3974               CI->getOperand(0),
3975               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3976           break;
3977         }
3978       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3979         auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType())
3980                              ->getNumElements();
3981         auto *O0 = B.CreateZExtOrTrunc(
3982             SI->getOperand(0),
3983             FixedVectorType::get(ScalarTruncatedTy, Elements0));
3984         auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType())
3985                              ->getNumElements();
3986         auto *O1 = B.CreateZExtOrTrunc(
3987             SI->getOperand(1),
3988             FixedVectorType::get(ScalarTruncatedTy, Elements1));
3989 
3990         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3991       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3992         // Don't do anything with the operands, just extend the result.
3993         continue;
3994       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3995         auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType())
3996                             ->getNumElements();
3997         auto *O0 = B.CreateZExtOrTrunc(
3998             IE->getOperand(0),
3999             FixedVectorType::get(ScalarTruncatedTy, Elements));
4000         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
4001         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
4002       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
4003         auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType())
4004                             ->getNumElements();
4005         auto *O0 = B.CreateZExtOrTrunc(
4006             EE->getOperand(0),
4007             FixedVectorType::get(ScalarTruncatedTy, Elements));
4008         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
4009       } else {
4010         // If we don't know what to do, be conservative and don't do anything.
4011         continue;
4012       }
4013 
4014       // Lastly, extend the result.
4015       NewI->takeName(cast<Instruction>(I));
4016       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
4017       I->replaceAllUsesWith(Res);
4018       cast<Instruction>(I)->eraseFromParent();
4019       Erased.insert(I);
4020       State.reset(Def, Res, Part);
4021     }
4022   }
4023 
4024   // We'll have created a bunch of ZExts that are now parentless. Clean up.
4025   for (const auto &KV : Cost->getMinimalBitwidths()) {
4026     // If the value wasn't vectorized, we must maintain the original scalar
4027     // type. The absence of the value from State indicates that it
4028     // wasn't vectorized.
4029     VPValue *Def = State.Plan->getVPValue(KV.first);
4030     if (!State.hasAnyVectorValue(Def))
4031       continue;
4032     for (unsigned Part = 0; Part < UF; ++Part) {
4033       Value *I = State.get(Def, Part);
4034       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
4035       if (Inst && Inst->use_empty()) {
4036         Value *NewI = Inst->getOperand(0);
4037         Inst->eraseFromParent();
4038         State.reset(Def, NewI, Part);
4039       }
4040     }
4041   }
4042 }
4043 
4044 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
4045   // Insert truncates and extends for any truncated instructions as hints to
4046   // InstCombine.
4047   if (VF.isVector())
4048     truncateToMinimalBitwidths(State);
4049 
4050   // Fix widened non-induction PHIs by setting up the PHI operands.
4051   if (OrigPHIsToFix.size()) {
4052     assert(EnableVPlanNativePath &&
4053            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
4054     fixNonInductionPHIs(State);
4055   }
4056 
4057   // At this point every instruction in the original loop is widened to a
4058   // vector form. Now we need to fix the recurrences in the loop. These PHI
4059   // nodes are currently empty because we did not want to introduce cycles.
4060   // This is the second stage of vectorizing recurrences.
4061   fixCrossIterationPHIs(State);
4062 
4063   // Forget the original basic block.
4064   PSE.getSE()->forgetLoop(OrigLoop);
4065 
4066   // Fix-up external users of the induction variables.
4067   for (auto &Entry : Legal->getInductionVars())
4068     fixupIVUsers(Entry.first, Entry.second,
4069                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
4070                  IVEndValues[Entry.first], LoopMiddleBlock);
4071 
4072   fixLCSSAPHIs(State);
4073   for (Instruction *PI : PredicatedInstructions)
4074     sinkScalarOperands(&*PI);
4075 
4076   // Remove redundant induction instructions.
4077   cse(LoopVectorBody);
4078 
4079   // Set/update profile weights for the vector and remainder loops as original
4080   // loop iterations are now distributed among them. Note that original loop
4081   // represented by LoopScalarBody becomes remainder loop after vectorization.
4082   //
4083   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
4084   // end up getting slightly roughened result but that should be OK since
4085   // profile is not inherently precise anyway. Note also possible bypass of
4086   // vector code caused by legality checks is ignored, assigning all the weight
4087   // to the vector loop, optimistically.
4088   //
4089   // For scalable vectorization we can't know at compile time how many iterations
4090   // of the loop are handled in one vector iteration, so instead assume a pessimistic
4091   // vscale of '1'.
4092   setProfileInfoAfterUnrolling(
4093       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
4094       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
4095 }
4096 
4097 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
4098   // In order to support recurrences we need to be able to vectorize Phi nodes.
4099   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4100   // stage #2: We now need to fix the recurrences by adding incoming edges to
4101   // the currently empty PHI nodes. At this point every instruction in the
4102   // original loop is widened to a vector form so we can use them to construct
4103   // the incoming edges.
4104   VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock();
4105   for (VPRecipeBase &R : Header->phis()) {
4106     auto *PhiR = dyn_cast<VPWidenPHIRecipe>(&R);
4107     if (!PhiR)
4108       continue;
4109     auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
4110     if (PhiR->getRecurrenceDescriptor()) {
4111       fixReduction(PhiR, State);
4112     } else if (Legal->isFirstOrderRecurrence(OrigPhi))
4113       fixFirstOrderRecurrence(OrigPhi, State);
4114   }
4115 }
4116 
4117 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi,
4118                                                   VPTransformState &State) {
4119   // This is the second phase of vectorizing first-order recurrences. An
4120   // overview of the transformation is described below. Suppose we have the
4121   // following loop.
4122   //
4123   //   for (int i = 0; i < n; ++i)
4124   //     b[i] = a[i] - a[i - 1];
4125   //
4126   // There is a first-order recurrence on "a". For this loop, the shorthand
4127   // scalar IR looks like:
4128   //
4129   //   scalar.ph:
4130   //     s_init = a[-1]
4131   //     br scalar.body
4132   //
4133   //   scalar.body:
4134   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4135   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4136   //     s2 = a[i]
4137   //     b[i] = s2 - s1
4138   //     br cond, scalar.body, ...
4139   //
4140   // In this example, s1 is a recurrence because it's value depends on the
4141   // previous iteration. In the first phase of vectorization, we created a
4142   // temporary value for s1. We now complete the vectorization and produce the
4143   // shorthand vector IR shown below (for VF = 4, UF = 1).
4144   //
4145   //   vector.ph:
4146   //     v_init = vector(..., ..., ..., a[-1])
4147   //     br vector.body
4148   //
4149   //   vector.body
4150   //     i = phi [0, vector.ph], [i+4, vector.body]
4151   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4152   //     v2 = a[i, i+1, i+2, i+3];
4153   //     v3 = vector(v1(3), v2(0, 1, 2))
4154   //     b[i, i+1, i+2, i+3] = v2 - v3
4155   //     br cond, vector.body, middle.block
4156   //
4157   //   middle.block:
4158   //     x = v2(3)
4159   //     br scalar.ph
4160   //
4161   //   scalar.ph:
4162   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4163   //     br scalar.body
4164   //
4165   // After execution completes the vector loop, we extract the next value of
4166   // the recurrence (x) to use as the initial value in the scalar loop.
4167 
4168   // Get the original loop preheader and single loop latch.
4169   auto *Preheader = OrigLoop->getLoopPreheader();
4170   auto *Latch = OrigLoop->getLoopLatch();
4171 
4172   // Get the initial and previous values of the scalar recurrence.
4173   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
4174   auto *Previous = Phi->getIncomingValueForBlock(Latch);
4175 
4176   auto *IdxTy = Builder.getInt32Ty();
4177   auto *One = ConstantInt::get(IdxTy, 1);
4178 
4179   // Create a vector from the initial value.
4180   auto *VectorInit = ScalarInit;
4181   if (VF.isVector()) {
4182     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4183     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4184     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4185     VectorInit = Builder.CreateInsertElement(
4186         PoisonValue::get(VectorType::get(VectorInit->getType(), VF)),
4187         VectorInit, LastIdx, "vector.recur.init");
4188   }
4189 
4190   VPValue *PhiDef = State.Plan->getVPValue(Phi);
4191   VPValue *PreviousDef = State.Plan->getVPValue(Previous);
4192   // We constructed a temporary phi node in the first phase of vectorization.
4193   // This phi node will eventually be deleted.
4194   Builder.SetInsertPoint(cast<Instruction>(State.get(PhiDef, 0)));
4195 
4196   // Create a phi node for the new recurrence. The current value will either be
4197   // the initial value inserted into a vector or loop-varying vector value.
4198   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
4199   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
4200 
4201   // Get the vectorized previous value of the last part UF - 1. It appears last
4202   // among all unrolled iterations, due to the order of their construction.
4203   Value *PreviousLastPart = State.get(PreviousDef, UF - 1);
4204 
4205   // Find and set the insertion point after the previous value if it is an
4206   // instruction.
4207   BasicBlock::iterator InsertPt;
4208   // Note that the previous value may have been constant-folded so it is not
4209   // guaranteed to be an instruction in the vector loop.
4210   // FIXME: Loop invariant values do not form recurrences. We should deal with
4211   //        them earlier.
4212   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
4213     InsertPt = LoopVectorBody->getFirstInsertionPt();
4214   else {
4215     Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
4216     if (isa<PHINode>(PreviousLastPart))
4217       // If the previous value is a phi node, we should insert after all the phi
4218       // nodes in the block containing the PHI to avoid breaking basic block
4219       // verification. Note that the basic block may be different to
4220       // LoopVectorBody, in case we predicate the loop.
4221       InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
4222     else
4223       InsertPt = ++PreviousInst->getIterator();
4224   }
4225   Builder.SetInsertPoint(&*InsertPt);
4226 
4227   // The vector from which to take the initial value for the current iteration
4228   // (actual or unrolled). Initially, this is the vector phi node.
4229   Value *Incoming = VecPhi;
4230 
4231   // Shuffle the current and previous vector and update the vector parts.
4232   for (unsigned Part = 0; Part < UF; ++Part) {
4233     Value *PreviousPart = State.get(PreviousDef, Part);
4234     Value *PhiPart = State.get(PhiDef, Part);
4235     auto *Shuffle = VF.isVector()
4236                         ? Builder.CreateVectorSplice(Incoming, PreviousPart, -1)
4237                         : Incoming;
4238     PhiPart->replaceAllUsesWith(Shuffle);
4239     cast<Instruction>(PhiPart)->eraseFromParent();
4240     State.reset(PhiDef, Shuffle, Part);
4241     Incoming = PreviousPart;
4242   }
4243 
4244   // Fix the latch value of the new recurrence in the vector loop.
4245   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
4246 
4247   // Extract the last vector element in the middle block. This will be the
4248   // initial value for the recurrence when jumping to the scalar loop.
4249   auto *ExtractForScalar = Incoming;
4250   if (VF.isVector()) {
4251     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4252     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4253     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4254     ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
4255                                                     "vector.recur.extract");
4256   }
4257   // Extract the second last element in the middle block if the
4258   // Phi is used outside the loop. We need to extract the phi itself
4259   // and not the last element (the phi update in the current iteration). This
4260   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4261   // when the scalar loop is not run at all.
4262   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4263   if (VF.isVector()) {
4264     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4265     auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
4266     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4267         Incoming, Idx, "vector.recur.extract.for.phi");
4268   } else if (UF > 1)
4269     // When loop is unrolled without vectorizing, initialize
4270     // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
4271     // of `Incoming`. This is analogous to the vectorized case above: extracting
4272     // the second last element when VF > 1.
4273     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
4274 
4275   // Fix the initial value of the original recurrence in the scalar loop.
4276   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4277   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4278   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4279     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4280     Start->addIncoming(Incoming, BB);
4281   }
4282 
4283   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4284   Phi->setName("scalar.recur");
4285 
4286   // Finally, fix users of the recurrence outside the loop. The users will need
4287   // either the last value of the scalar recurrence or the last value of the
4288   // vector recurrence we extracted in the middle block. Since the loop is in
4289   // LCSSA form, we just need to find all the phi nodes for the original scalar
4290   // recurrence in the exit block, and then add an edge for the middle block.
4291   // Note that LCSSA does not imply single entry when the original scalar loop
4292   // had multiple exiting edges (as we always run the last iteration in the
4293   // scalar epilogue); in that case, the exiting path through middle will be
4294   // dynamically dead and the value picked for the phi doesn't matter.
4295   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4296     if (any_of(LCSSAPhi.incoming_values(),
4297                [Phi](Value *V) { return V == Phi; }))
4298       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4299 }
4300 
4301 static bool useOrderedReductions(RecurrenceDescriptor &RdxDesc) {
4302   return EnableStrictReductions && RdxDesc.isOrdered();
4303 }
4304 
4305 void InnerLoopVectorizer::fixReduction(VPWidenPHIRecipe *PhiR,
4306                                        VPTransformState &State) {
4307   PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
4308   // Get it's reduction variable descriptor.
4309   assert(Legal->isReductionVariable(OrigPhi) &&
4310          "Unable to find the reduction variable");
4311   RecurrenceDescriptor RdxDesc = *PhiR->getRecurrenceDescriptor();
4312 
4313   RecurKind RK = RdxDesc.getRecurrenceKind();
4314   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4315   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4316   setDebugLocFromInst(Builder, ReductionStartValue);
4317   bool IsInLoopReductionPhi = Cost->isInLoopReduction(OrigPhi);
4318 
4319   VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst);
4320   // This is the vector-clone of the value that leaves the loop.
4321   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
4322 
4323   // Wrap flags are in general invalid after vectorization, clear them.
4324   clearReductionWrapFlags(RdxDesc, State);
4325 
4326   // Fix the vector-loop phi.
4327 
4328   // Reductions do not have to start at zero. They can start with
4329   // any loop invariant values.
4330   BasicBlock *VectorLoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4331 
4332   bool IsOrdered = State.VF.isVector() && IsInLoopReductionPhi &&
4333                    useOrderedReductions(RdxDesc);
4334 
4335   for (unsigned Part = 0; Part < UF; ++Part) {
4336     if (IsOrdered && Part > 0)
4337       break;
4338     Value *VecRdxPhi = State.get(PhiR->getVPSingleValue(), Part);
4339     Value *Val = State.get(PhiR->getBackedgeValue(), Part);
4340     if (IsOrdered)
4341       Val = State.get(PhiR->getBackedgeValue(), UF - 1);
4342 
4343     cast<PHINode>(VecRdxPhi)->addIncoming(Val, VectorLoopLatch);
4344   }
4345 
4346   // Before each round, move the insertion point right between
4347   // the PHIs and the values we are going to write.
4348   // This allows us to write both PHINodes and the extractelement
4349   // instructions.
4350   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4351 
4352   setDebugLocFromInst(Builder, LoopExitInst);
4353 
4354   Type *PhiTy = OrigPhi->getType();
4355   // If tail is folded by masking, the vector value to leave the loop should be
4356   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4357   // instead of the former. For an inloop reduction the reduction will already
4358   // be predicated, and does not need to be handled here.
4359   if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) {
4360     for (unsigned Part = 0; Part < UF; ++Part) {
4361       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
4362       Value *Sel = nullptr;
4363       for (User *U : VecLoopExitInst->users()) {
4364         if (isa<SelectInst>(U)) {
4365           assert(!Sel && "Reduction exit feeding two selects");
4366           Sel = U;
4367         } else
4368           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4369       }
4370       assert(Sel && "Reduction exit feeds no select");
4371       State.reset(LoopExitInstDef, Sel, Part);
4372 
4373       // If the target can create a predicated operator for the reduction at no
4374       // extra cost in the loop (for example a predicated vadd), it can be
4375       // cheaper for the select to remain in the loop than be sunk out of it,
4376       // and so use the select value for the phi instead of the old
4377       // LoopExitValue.
4378       if (PreferPredicatedReductionSelect ||
4379           TTI->preferPredicatedReductionSelect(
4380               RdxDesc.getOpcode(), PhiTy,
4381               TargetTransformInfo::ReductionFlags())) {
4382         auto *VecRdxPhi =
4383             cast<PHINode>(State.get(PhiR->getVPSingleValue(), Part));
4384         VecRdxPhi->setIncomingValueForBlock(
4385             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4386       }
4387     }
4388   }
4389 
4390   // If the vector reduction can be performed in a smaller type, we truncate
4391   // then extend the loop exit value to enable InstCombine to evaluate the
4392   // entire expression in the smaller type.
4393   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
4394     assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!");
4395     assert(!VF.isScalable() && "scalable vectors not yet supported.");
4396     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4397     Builder.SetInsertPoint(
4398         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4399     VectorParts RdxParts(UF);
4400     for (unsigned Part = 0; Part < UF; ++Part) {
4401       RdxParts[Part] = State.get(LoopExitInstDef, Part);
4402       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4403       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4404                                         : Builder.CreateZExt(Trunc, VecTy);
4405       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
4406            UI != RdxParts[Part]->user_end();)
4407         if (*UI != Trunc) {
4408           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
4409           RdxParts[Part] = Extnd;
4410         } else {
4411           ++UI;
4412         }
4413     }
4414     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4415     for (unsigned Part = 0; Part < UF; ++Part) {
4416       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4417       State.reset(LoopExitInstDef, RdxParts[Part], Part);
4418     }
4419   }
4420 
4421   // Reduce all of the unrolled parts into a single vector.
4422   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
4423   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4424 
4425   // The middle block terminator has already been assigned a DebugLoc here (the
4426   // OrigLoop's single latch terminator). We want the whole middle block to
4427   // appear to execute on this line because: (a) it is all compiler generated,
4428   // (b) these instructions are always executed after evaluating the latch
4429   // conditional branch, and (c) other passes may add new predecessors which
4430   // terminate on this line. This is the easiest way to ensure we don't
4431   // accidentally cause an extra step back into the loop while debugging.
4432   setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
4433   if (IsOrdered)
4434     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
4435   else {
4436     // Floating-point operations should have some FMF to enable the reduction.
4437     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4438     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4439     for (unsigned Part = 1; Part < UF; ++Part) {
4440       Value *RdxPart = State.get(LoopExitInstDef, Part);
4441       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4442         ReducedPartRdx = Builder.CreateBinOp(
4443             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4444       } else {
4445         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4446       }
4447     }
4448   }
4449 
4450   // Create the reduction after the loop. Note that inloop reductions create the
4451   // target reduction in the loop using a Reduction recipe.
4452   if (VF.isVector() && !IsInLoopReductionPhi) {
4453     ReducedPartRdx =
4454         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx);
4455     // If the reduction can be performed in a smaller type, we need to extend
4456     // the reduction to the wider type before we branch to the original loop.
4457     if (PhiTy != RdxDesc.getRecurrenceType())
4458       ReducedPartRdx = RdxDesc.isSigned()
4459                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
4460                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
4461   }
4462 
4463   // Create a phi node that merges control-flow from the backedge-taken check
4464   // block and the middle block.
4465   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
4466                                         LoopScalarPreHeader->getTerminator());
4467   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4468     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4469   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4470 
4471   // Now, we need to fix the users of the reduction variable
4472   // inside and outside of the scalar remainder loop.
4473 
4474   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4475   // in the exit blocks.  See comment on analogous loop in
4476   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4477   for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4478     if (any_of(LCSSAPhi.incoming_values(),
4479                [LoopExitInst](Value *V) { return V == LoopExitInst; }))
4480       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4481 
4482   // Fix the scalar loop reduction variable with the incoming reduction sum
4483   // from the vector body and from the backedge value.
4484   int IncomingEdgeBlockIdx =
4485       OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4486   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4487   // Pick the other block.
4488   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4489   OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4490   OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4491 }
4492 
4493 void InnerLoopVectorizer::clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc,
4494                                                   VPTransformState &State) {
4495   RecurKind RK = RdxDesc.getRecurrenceKind();
4496   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4497     return;
4498 
4499   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4500   assert(LoopExitInstr && "null loop exit instruction");
4501   SmallVector<Instruction *, 8> Worklist;
4502   SmallPtrSet<Instruction *, 8> Visited;
4503   Worklist.push_back(LoopExitInstr);
4504   Visited.insert(LoopExitInstr);
4505 
4506   while (!Worklist.empty()) {
4507     Instruction *Cur = Worklist.pop_back_val();
4508     if (isa<OverflowingBinaryOperator>(Cur))
4509       for (unsigned Part = 0; Part < UF; ++Part) {
4510         Value *V = State.get(State.Plan->getVPValue(Cur), Part);
4511         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4512       }
4513 
4514     for (User *U : Cur->users()) {
4515       Instruction *UI = cast<Instruction>(U);
4516       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4517           Visited.insert(UI).second)
4518         Worklist.push_back(UI);
4519     }
4520   }
4521 }
4522 
4523 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4524   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4525     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4526       // Some phis were already hand updated by the reduction and recurrence
4527       // code above, leave them alone.
4528       continue;
4529 
4530     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4531     // Non-instruction incoming values will have only one value.
4532 
4533     VPLane Lane = VPLane::getFirstLane();
4534     if (isa<Instruction>(IncomingValue) &&
4535         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4536                                            VF))
4537       Lane = VPLane::getLastLaneForVF(VF);
4538 
4539     // Can be a loop invariant incoming value or the last scalar value to be
4540     // extracted from the vectorized loop.
4541     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4542     Value *lastIncomingValue =
4543         OrigLoop->isLoopInvariant(IncomingValue)
4544             ? IncomingValue
4545             : State.get(State.Plan->getVPValue(IncomingValue),
4546                         VPIteration(UF - 1, Lane));
4547     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4548   }
4549 }
4550 
4551 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4552   // The basic block and loop containing the predicated instruction.
4553   auto *PredBB = PredInst->getParent();
4554   auto *VectorLoop = LI->getLoopFor(PredBB);
4555 
4556   // Initialize a worklist with the operands of the predicated instruction.
4557   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4558 
4559   // Holds instructions that we need to analyze again. An instruction may be
4560   // reanalyzed if we don't yet know if we can sink it or not.
4561   SmallVector<Instruction *, 8> InstsToReanalyze;
4562 
4563   // Returns true if a given use occurs in the predicated block. Phi nodes use
4564   // their operands in their corresponding predecessor blocks.
4565   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4566     auto *I = cast<Instruction>(U.getUser());
4567     BasicBlock *BB = I->getParent();
4568     if (auto *Phi = dyn_cast<PHINode>(I))
4569       BB = Phi->getIncomingBlock(
4570           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4571     return BB == PredBB;
4572   };
4573 
4574   // Iteratively sink the scalarized operands of the predicated instruction
4575   // into the block we created for it. When an instruction is sunk, it's
4576   // operands are then added to the worklist. The algorithm ends after one pass
4577   // through the worklist doesn't sink a single instruction.
4578   bool Changed;
4579   do {
4580     // Add the instructions that need to be reanalyzed to the worklist, and
4581     // reset the changed indicator.
4582     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4583     InstsToReanalyze.clear();
4584     Changed = false;
4585 
4586     while (!Worklist.empty()) {
4587       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4588 
4589       // We can't sink an instruction if it is a phi node, is already in the
4590       // predicated block, is not in the loop, or may have side effects.
4591       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
4592           !VectorLoop->contains(I) || I->mayHaveSideEffects())
4593         continue;
4594 
4595       // It's legal to sink the instruction if all its uses occur in the
4596       // predicated block. Otherwise, there's nothing to do yet, and we may
4597       // need to reanalyze the instruction.
4598       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4599         InstsToReanalyze.push_back(I);
4600         continue;
4601       }
4602 
4603       // Move the instruction to the beginning of the predicated block, and add
4604       // it's operands to the worklist.
4605       I->moveBefore(&*PredBB->getFirstInsertionPt());
4606       Worklist.insert(I->op_begin(), I->op_end());
4607 
4608       // The sinking may have enabled other instructions to be sunk, so we will
4609       // need to iterate.
4610       Changed = true;
4611     }
4612   } while (Changed);
4613 }
4614 
4615 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4616   for (PHINode *OrigPhi : OrigPHIsToFix) {
4617     VPWidenPHIRecipe *VPPhi =
4618         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4619     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4620     // Make sure the builder has a valid insert point.
4621     Builder.SetInsertPoint(NewPhi);
4622     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4623       VPValue *Inc = VPPhi->getIncomingValue(i);
4624       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4625       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4626     }
4627   }
4628 }
4629 
4630 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef,
4631                                    VPUser &Operands, unsigned UF,
4632                                    ElementCount VF, bool IsPtrLoopInvariant,
4633                                    SmallBitVector &IsIndexLoopInvariant,
4634                                    VPTransformState &State) {
4635   // Construct a vector GEP by widening the operands of the scalar GEP as
4636   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4637   // results in a vector of pointers when at least one operand of the GEP
4638   // is vector-typed. Thus, to keep the representation compact, we only use
4639   // vector-typed operands for loop-varying values.
4640 
4641   if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4642     // If we are vectorizing, but the GEP has only loop-invariant operands,
4643     // the GEP we build (by only using vector-typed operands for
4644     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4645     // produce a vector of pointers, we need to either arbitrarily pick an
4646     // operand to broadcast, or broadcast a clone of the original GEP.
4647     // Here, we broadcast a clone of the original.
4648     //
4649     // TODO: If at some point we decide to scalarize instructions having
4650     //       loop-invariant operands, this special case will no longer be
4651     //       required. We would add the scalarization decision to
4652     //       collectLoopScalars() and teach getVectorValue() to broadcast
4653     //       the lane-zero scalar value.
4654     auto *Clone = Builder.Insert(GEP->clone());
4655     for (unsigned Part = 0; Part < UF; ++Part) {
4656       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4657       State.set(VPDef, EntryPart, Part);
4658       addMetadata(EntryPart, GEP);
4659     }
4660   } else {
4661     // If the GEP has at least one loop-varying operand, we are sure to
4662     // produce a vector of pointers. But if we are only unrolling, we want
4663     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4664     // produce with the code below will be scalar (if VF == 1) or vector
4665     // (otherwise). Note that for the unroll-only case, we still maintain
4666     // values in the vector mapping with initVector, as we do for other
4667     // instructions.
4668     for (unsigned Part = 0; Part < UF; ++Part) {
4669       // The pointer operand of the new GEP. If it's loop-invariant, we
4670       // won't broadcast it.
4671       auto *Ptr = IsPtrLoopInvariant
4672                       ? State.get(Operands.getOperand(0), VPIteration(0, 0))
4673                       : State.get(Operands.getOperand(0), Part);
4674 
4675       // Collect all the indices for the new GEP. If any index is
4676       // loop-invariant, we won't broadcast it.
4677       SmallVector<Value *, 4> Indices;
4678       for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) {
4679         VPValue *Operand = Operands.getOperand(I);
4680         if (IsIndexLoopInvariant[I - 1])
4681           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
4682         else
4683           Indices.push_back(State.get(Operand, Part));
4684       }
4685 
4686       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4687       // but it should be a vector, otherwise.
4688       auto *NewGEP =
4689           GEP->isInBounds()
4690               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4691                                           Indices)
4692               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4693       assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
4694              "NewGEP is not a pointer vector");
4695       State.set(VPDef, NewGEP, Part);
4696       addMetadata(NewGEP, GEP);
4697     }
4698   }
4699 }
4700 
4701 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4702                                               RecurrenceDescriptor *RdxDesc,
4703                                               VPWidenPHIRecipe *PhiR,
4704                                               VPTransformState &State) {
4705   PHINode *P = cast<PHINode>(PN);
4706   if (EnableVPlanNativePath) {
4707     // Currently we enter here in the VPlan-native path for non-induction
4708     // PHIs where all control flow is uniform. We simply widen these PHIs.
4709     // Create a vector phi with no operands - the vector phi operands will be
4710     // set at the end of vector code generation.
4711     Type *VecTy = (State.VF.isScalar())
4712                       ? PN->getType()
4713                       : VectorType::get(PN->getType(), State.VF);
4714     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4715     State.set(PhiR, VecPhi, 0);
4716     OrigPHIsToFix.push_back(P);
4717 
4718     return;
4719   }
4720 
4721   assert(PN->getParent() == OrigLoop->getHeader() &&
4722          "Non-header phis should have been handled elsewhere");
4723 
4724   VPValue *StartVPV = PhiR->getStartValue();
4725   Value *StartV = StartVPV ? StartVPV->getLiveInIRValue() : nullptr;
4726   // In order to support recurrences we need to be able to vectorize Phi nodes.
4727   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4728   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4729   // this value when we vectorize all of the instructions that use the PHI.
4730   if (RdxDesc || Legal->isFirstOrderRecurrence(P)) {
4731     Value *Iden = nullptr;
4732     bool ScalarPHI =
4733         (State.VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN));
4734     Type *VecTy =
4735         ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), State.VF);
4736 
4737     if (RdxDesc) {
4738       assert(Legal->isReductionVariable(P) && StartV &&
4739              "RdxDesc should only be set for reduction variables; in that case "
4740              "a StartV is also required");
4741       RecurKind RK = RdxDesc->getRecurrenceKind();
4742       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) {
4743         // MinMax reduction have the start value as their identify.
4744         if (ScalarPHI) {
4745           Iden = StartV;
4746         } else {
4747           IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4748           Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4749           StartV = Iden =
4750               Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident");
4751         }
4752       } else {
4753         Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity(
4754             RK, VecTy->getScalarType(), RdxDesc->getFastMathFlags());
4755         Iden = IdenC;
4756 
4757         if (!ScalarPHI) {
4758           Iden = ConstantVector::getSplat(State.VF, IdenC);
4759           IRBuilderBase::InsertPointGuard IPBuilder(Builder);
4760           Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
4761           Constant *Zero = Builder.getInt32(0);
4762           StartV = Builder.CreateInsertElement(Iden, StartV, Zero);
4763         }
4764       }
4765     }
4766 
4767     bool IsOrdered = State.VF.isVector() &&
4768                      Cost->isInLoopReduction(cast<PHINode>(PN)) &&
4769                      useOrderedReductions(*RdxDesc);
4770 
4771     for (unsigned Part = 0; Part < State.UF; ++Part) {
4772       // This is phase one of vectorizing PHIs.
4773       if (Part > 0 && IsOrdered)
4774         return;
4775       Value *EntryPart = PHINode::Create(
4776           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4777       State.set(PhiR, EntryPart, Part);
4778       if (StartV) {
4779         // Make sure to add the reduction start value only to the
4780         // first unroll part.
4781         Value *StartVal = (Part == 0) ? StartV : Iden;
4782         cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader);
4783       }
4784     }
4785     return;
4786   }
4787 
4788   assert(!Legal->isReductionVariable(P) &&
4789          "reductions should be handled above");
4790 
4791   setDebugLocFromInst(Builder, P);
4792 
4793   // This PHINode must be an induction variable.
4794   // Make sure that we know about it.
4795   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4796 
4797   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4798   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4799 
4800   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4801   // which can be found from the original scalar operations.
4802   switch (II.getKind()) {
4803   case InductionDescriptor::IK_NoInduction:
4804     llvm_unreachable("Unknown induction");
4805   case InductionDescriptor::IK_IntInduction:
4806   case InductionDescriptor::IK_FpInduction:
4807     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4808   case InductionDescriptor::IK_PtrInduction: {
4809     // Handle the pointer induction variable case.
4810     assert(P->getType()->isPointerTy() && "Unexpected type.");
4811 
4812     if (Cost->isScalarAfterVectorization(P, State.VF)) {
4813       // This is the normalized GEP that starts counting at zero.
4814       Value *PtrInd =
4815           Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType());
4816       // Determine the number of scalars we need to generate for each unroll
4817       // iteration. If the instruction is uniform, we only need to generate the
4818       // first lane. Otherwise, we generate all VF values.
4819       bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF);
4820       assert((IsUniform || !VF.isScalable()) &&
4821              "Currently unsupported for scalable vectors");
4822       unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue();
4823 
4824       for (unsigned Part = 0; Part < UF; ++Part) {
4825         Value *PartStart = createStepForVF(
4826             Builder, ConstantInt::get(PtrInd->getType(), Part), VF);
4827         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4828           Value *Idx = Builder.CreateAdd(
4829               PartStart, ConstantInt::get(PtrInd->getType(), Lane));
4830           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4831           Value *SclrGep =
4832               emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4833           SclrGep->setName("next.gep");
4834           State.set(PhiR, SclrGep, VPIteration(Part, Lane));
4835         }
4836       }
4837       return;
4838     }
4839     assert(isa<SCEVConstant>(II.getStep()) &&
4840            "Induction step not a SCEV constant!");
4841     Type *PhiType = II.getStep()->getType();
4842 
4843     // Build a pointer phi
4844     Value *ScalarStartValue = II.getStartValue();
4845     Type *ScStValueType = ScalarStartValue->getType();
4846     PHINode *NewPointerPhi =
4847         PHINode::Create(ScStValueType, 2, "pointer.phi", Induction);
4848     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4849 
4850     // A pointer induction, performed by using a gep
4851     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4852     Instruction *InductionLoc = LoopLatch->getTerminator();
4853     const SCEV *ScalarStep = II.getStep();
4854     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4855     Value *ScalarStepValue =
4856         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4857     Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF);
4858     Value *NumUnrolledElems =
4859         Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
4860     Value *InductionGEP = GetElementPtrInst::Create(
4861         ScStValueType->getPointerElementType(), NewPointerPhi,
4862         Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
4863         InductionLoc);
4864     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4865 
4866     // Create UF many actual address geps that use the pointer
4867     // phi as base and a vectorized version of the step value
4868     // (<step*0, ..., step*N>) as offset.
4869     for (unsigned Part = 0; Part < State.UF; ++Part) {
4870       Type *VecPhiType = VectorType::get(PhiType, State.VF);
4871       Value *StartOffsetScalar =
4872           Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
4873       Value *StartOffset =
4874           Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
4875       // Create a vector of consecutive numbers from zero to VF.
4876       StartOffset =
4877           Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType));
4878 
4879       Value *GEP = Builder.CreateGEP(
4880           ScStValueType->getPointerElementType(), NewPointerPhi,
4881           Builder.CreateMul(
4882               StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue),
4883               "vector.gep"));
4884       State.set(PhiR, GEP, Part);
4885     }
4886   }
4887   }
4888 }
4889 
4890 /// A helper function for checking whether an integer division-related
4891 /// instruction may divide by zero (in which case it must be predicated if
4892 /// executed conditionally in the scalar code).
4893 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4894 /// Non-zero divisors that are non compile-time constants will not be
4895 /// converted into multiplication, so we will still end up scalarizing
4896 /// the division, but can do so w/o predication.
4897 static bool mayDivideByZero(Instruction &I) {
4898   assert((I.getOpcode() == Instruction::UDiv ||
4899           I.getOpcode() == Instruction::SDiv ||
4900           I.getOpcode() == Instruction::URem ||
4901           I.getOpcode() == Instruction::SRem) &&
4902          "Unexpected instruction");
4903   Value *Divisor = I.getOperand(1);
4904   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4905   return !CInt || CInt->isZero();
4906 }
4907 
4908 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def,
4909                                            VPUser &User,
4910                                            VPTransformState &State) {
4911   switch (I.getOpcode()) {
4912   case Instruction::Call:
4913   case Instruction::Br:
4914   case Instruction::PHI:
4915   case Instruction::GetElementPtr:
4916   case Instruction::Select:
4917     llvm_unreachable("This instruction is handled by a different recipe.");
4918   case Instruction::UDiv:
4919   case Instruction::SDiv:
4920   case Instruction::SRem:
4921   case Instruction::URem:
4922   case Instruction::Add:
4923   case Instruction::FAdd:
4924   case Instruction::Sub:
4925   case Instruction::FSub:
4926   case Instruction::FNeg:
4927   case Instruction::Mul:
4928   case Instruction::FMul:
4929   case Instruction::FDiv:
4930   case Instruction::FRem:
4931   case Instruction::Shl:
4932   case Instruction::LShr:
4933   case Instruction::AShr:
4934   case Instruction::And:
4935   case Instruction::Or:
4936   case Instruction::Xor: {
4937     // Just widen unops and binops.
4938     setDebugLocFromInst(Builder, &I);
4939 
4940     for (unsigned Part = 0; Part < UF; ++Part) {
4941       SmallVector<Value *, 2> Ops;
4942       for (VPValue *VPOp : User.operands())
4943         Ops.push_back(State.get(VPOp, Part));
4944 
4945       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
4946 
4947       if (auto *VecOp = dyn_cast<Instruction>(V))
4948         VecOp->copyIRFlags(&I);
4949 
4950       // Use this vector value for all users of the original instruction.
4951       State.set(Def, V, Part);
4952       addMetadata(V, &I);
4953     }
4954 
4955     break;
4956   }
4957   case Instruction::ICmp:
4958   case Instruction::FCmp: {
4959     // Widen compares. Generate vector compares.
4960     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4961     auto *Cmp = cast<CmpInst>(&I);
4962     setDebugLocFromInst(Builder, Cmp);
4963     for (unsigned Part = 0; Part < UF; ++Part) {
4964       Value *A = State.get(User.getOperand(0), Part);
4965       Value *B = State.get(User.getOperand(1), Part);
4966       Value *C = nullptr;
4967       if (FCmp) {
4968         // Propagate fast math flags.
4969         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4970         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4971         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4972       } else {
4973         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4974       }
4975       State.set(Def, C, Part);
4976       addMetadata(C, &I);
4977     }
4978 
4979     break;
4980   }
4981 
4982   case Instruction::ZExt:
4983   case Instruction::SExt:
4984   case Instruction::FPToUI:
4985   case Instruction::FPToSI:
4986   case Instruction::FPExt:
4987   case Instruction::PtrToInt:
4988   case Instruction::IntToPtr:
4989   case Instruction::SIToFP:
4990   case Instruction::UIToFP:
4991   case Instruction::Trunc:
4992   case Instruction::FPTrunc:
4993   case Instruction::BitCast: {
4994     auto *CI = cast<CastInst>(&I);
4995     setDebugLocFromInst(Builder, CI);
4996 
4997     /// Vectorize casts.
4998     Type *DestTy =
4999         (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF);
5000 
5001     for (unsigned Part = 0; Part < UF; ++Part) {
5002       Value *A = State.get(User.getOperand(0), Part);
5003       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
5004       State.set(Def, Cast, Part);
5005       addMetadata(Cast, &I);
5006     }
5007     break;
5008   }
5009   default:
5010     // This instruction is not vectorized by simple widening.
5011     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
5012     llvm_unreachable("Unhandled instruction!");
5013   } // end of switch.
5014 }
5015 
5016 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
5017                                                VPUser &ArgOperands,
5018                                                VPTransformState &State) {
5019   assert(!isa<DbgInfoIntrinsic>(I) &&
5020          "DbgInfoIntrinsic should have been dropped during VPlan construction");
5021   setDebugLocFromInst(Builder, &I);
5022 
5023   Module *M = I.getParent()->getParent()->getParent();
5024   auto *CI = cast<CallInst>(&I);
5025 
5026   SmallVector<Type *, 4> Tys;
5027   for (Value *ArgOperand : CI->arg_operands())
5028     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
5029 
5030   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
5031 
5032   // The flag shows whether we use Intrinsic or a usual Call for vectorized
5033   // version of the instruction.
5034   // Is it beneficial to perform intrinsic call compared to lib call?
5035   bool NeedToScalarize = false;
5036   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
5037   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
5038   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
5039   assert((UseVectorIntrinsic || !NeedToScalarize) &&
5040          "Instruction should be scalarized elsewhere.");
5041   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
5042          "Either the intrinsic cost or vector call cost must be valid");
5043 
5044   for (unsigned Part = 0; Part < UF; ++Part) {
5045     SmallVector<Value *, 4> Args;
5046     for (auto &I : enumerate(ArgOperands.operands())) {
5047       // Some intrinsics have a scalar argument - don't replace it with a
5048       // vector.
5049       Value *Arg;
5050       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
5051         Arg = State.get(I.value(), Part);
5052       else
5053         Arg = State.get(I.value(), VPIteration(0, 0));
5054       Args.push_back(Arg);
5055     }
5056 
5057     Function *VectorF;
5058     if (UseVectorIntrinsic) {
5059       // Use vector version of the intrinsic.
5060       Type *TysForDecl[] = {CI->getType()};
5061       if (VF.isVector())
5062         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
5063       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
5064       assert(VectorF && "Can't retrieve vector intrinsic.");
5065     } else {
5066       // Use vector version of the function call.
5067       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
5068 #ifndef NDEBUG
5069       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
5070              "Can't create vector function.");
5071 #endif
5072         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
5073     }
5074       SmallVector<OperandBundleDef, 1> OpBundles;
5075       CI->getOperandBundlesAsDefs(OpBundles);
5076       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
5077 
5078       if (isa<FPMathOperator>(V))
5079         V->copyFastMathFlags(CI);
5080 
5081       State.set(Def, V, Part);
5082       addMetadata(V, &I);
5083   }
5084 }
5085 
5086 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef,
5087                                                  VPUser &Operands,
5088                                                  bool InvariantCond,
5089                                                  VPTransformState &State) {
5090   setDebugLocFromInst(Builder, &I);
5091 
5092   // The condition can be loop invariant  but still defined inside the
5093   // loop. This means that we can't just use the original 'cond' value.
5094   // We have to take the 'vectorized' value and pick the first lane.
5095   // Instcombine will make this a no-op.
5096   auto *InvarCond = InvariantCond
5097                         ? State.get(Operands.getOperand(0), VPIteration(0, 0))
5098                         : nullptr;
5099 
5100   for (unsigned Part = 0; Part < UF; ++Part) {
5101     Value *Cond =
5102         InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part);
5103     Value *Op0 = State.get(Operands.getOperand(1), Part);
5104     Value *Op1 = State.get(Operands.getOperand(2), Part);
5105     Value *Sel = Builder.CreateSelect(Cond, Op0, Op1);
5106     State.set(VPDef, Sel, Part);
5107     addMetadata(Sel, &I);
5108   }
5109 }
5110 
5111 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
5112   // We should not collect Scalars more than once per VF. Right now, this
5113   // function is called from collectUniformsAndScalars(), which already does
5114   // this check. Collecting Scalars for VF=1 does not make any sense.
5115   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
5116          "This function should not be visited twice for the same VF");
5117 
5118   SmallSetVector<Instruction *, 8> Worklist;
5119 
5120   // These sets are used to seed the analysis with pointers used by memory
5121   // accesses that will remain scalar.
5122   SmallSetVector<Instruction *, 8> ScalarPtrs;
5123   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
5124   auto *Latch = TheLoop->getLoopLatch();
5125 
5126   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
5127   // The pointer operands of loads and stores will be scalar as long as the
5128   // memory access is not a gather or scatter operation. The value operand of a
5129   // store will remain scalar if the store is scalarized.
5130   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
5131     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
5132     assert(WideningDecision != CM_Unknown &&
5133            "Widening decision should be ready at this moment");
5134     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
5135       if (Ptr == Store->getValueOperand())
5136         return WideningDecision == CM_Scalarize;
5137     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
5138            "Ptr is neither a value or pointer operand");
5139     return WideningDecision != CM_GatherScatter;
5140   };
5141 
5142   // A helper that returns true if the given value is a bitcast or
5143   // getelementptr instruction contained in the loop.
5144   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
5145     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
5146             isa<GetElementPtrInst>(V)) &&
5147            !TheLoop->isLoopInvariant(V);
5148   };
5149 
5150   auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) {
5151     if (!isa<PHINode>(Ptr) ||
5152         !Legal->getInductionVars().count(cast<PHINode>(Ptr)))
5153       return false;
5154     auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)];
5155     if (Induction.getKind() != InductionDescriptor::IK_PtrInduction)
5156       return false;
5157     return isScalarUse(MemAccess, Ptr);
5158   };
5159 
5160   // A helper that evaluates a memory access's use of a pointer. If the
5161   // pointer is actually the pointer induction of a loop, it is being
5162   // inserted into Worklist. If the use will be a scalar use, and the
5163   // pointer is only used by memory accesses, we place the pointer in
5164   // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs.
5165   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
5166     if (isScalarPtrInduction(MemAccess, Ptr)) {
5167       Worklist.insert(cast<Instruction>(Ptr));
5168       Instruction *Update = cast<Instruction>(
5169           cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch));
5170       Worklist.insert(Update);
5171       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr
5172                         << "\n");
5173       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update
5174                         << "\n");
5175       return;
5176     }
5177     // We only care about bitcast and getelementptr instructions contained in
5178     // the loop.
5179     if (!isLoopVaryingBitCastOrGEP(Ptr))
5180       return;
5181 
5182     // If the pointer has already been identified as scalar (e.g., if it was
5183     // also identified as uniform), there's nothing to do.
5184     auto *I = cast<Instruction>(Ptr);
5185     if (Worklist.count(I))
5186       return;
5187 
5188     // If the use of the pointer will be a scalar use, and all users of the
5189     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
5190     // place the pointer in PossibleNonScalarPtrs.
5191     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
5192           return isa<LoadInst>(U) || isa<StoreInst>(U);
5193         }))
5194       ScalarPtrs.insert(I);
5195     else
5196       PossibleNonScalarPtrs.insert(I);
5197   };
5198 
5199   // We seed the scalars analysis with three classes of instructions: (1)
5200   // instructions marked uniform-after-vectorization and (2) bitcast,
5201   // getelementptr and (pointer) phi instructions used by memory accesses
5202   // requiring a scalar use.
5203   //
5204   // (1) Add to the worklist all instructions that have been identified as
5205   // uniform-after-vectorization.
5206   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
5207 
5208   // (2) Add to the worklist all bitcast and getelementptr instructions used by
5209   // memory accesses requiring a scalar use. The pointer operands of loads and
5210   // stores will be scalar as long as the memory accesses is not a gather or
5211   // scatter operation. The value operand of a store will remain scalar if the
5212   // store is scalarized.
5213   for (auto *BB : TheLoop->blocks())
5214     for (auto &I : *BB) {
5215       if (auto *Load = dyn_cast<LoadInst>(&I)) {
5216         evaluatePtrUse(Load, Load->getPointerOperand());
5217       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
5218         evaluatePtrUse(Store, Store->getPointerOperand());
5219         evaluatePtrUse(Store, Store->getValueOperand());
5220       }
5221     }
5222   for (auto *I : ScalarPtrs)
5223     if (!PossibleNonScalarPtrs.count(I)) {
5224       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
5225       Worklist.insert(I);
5226     }
5227 
5228   // Insert the forced scalars.
5229   // FIXME: Currently widenPHIInstruction() often creates a dead vector
5230   // induction variable when the PHI user is scalarized.
5231   auto ForcedScalar = ForcedScalars.find(VF);
5232   if (ForcedScalar != ForcedScalars.end())
5233     for (auto *I : ForcedScalar->second)
5234       Worklist.insert(I);
5235 
5236   // Expand the worklist by looking through any bitcasts and getelementptr
5237   // instructions we've already identified as scalar. This is similar to the
5238   // expansion step in collectLoopUniforms(); however, here we're only
5239   // expanding to include additional bitcasts and getelementptr instructions.
5240   unsigned Idx = 0;
5241   while (Idx != Worklist.size()) {
5242     Instruction *Dst = Worklist[Idx++];
5243     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
5244       continue;
5245     auto *Src = cast<Instruction>(Dst->getOperand(0));
5246     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
5247           auto *J = cast<Instruction>(U);
5248           return !TheLoop->contains(J) || Worklist.count(J) ||
5249                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
5250                   isScalarUse(J, Src));
5251         })) {
5252       Worklist.insert(Src);
5253       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
5254     }
5255   }
5256 
5257   // An induction variable will remain scalar if all users of the induction
5258   // variable and induction variable update remain scalar.
5259   for (auto &Induction : Legal->getInductionVars()) {
5260     auto *Ind = Induction.first;
5261     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5262 
5263     // If tail-folding is applied, the primary induction variable will be used
5264     // to feed a vector compare.
5265     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
5266       continue;
5267 
5268     // Determine if all users of the induction variable are scalar after
5269     // vectorization.
5270     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5271       auto *I = cast<Instruction>(U);
5272       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
5273     });
5274     if (!ScalarInd)
5275       continue;
5276 
5277     // Determine if all users of the induction variable update instruction are
5278     // scalar after vectorization.
5279     auto ScalarIndUpdate =
5280         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5281           auto *I = cast<Instruction>(U);
5282           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
5283         });
5284     if (!ScalarIndUpdate)
5285       continue;
5286 
5287     // The induction variable and its update instruction will remain scalar.
5288     Worklist.insert(Ind);
5289     Worklist.insert(IndUpdate);
5290     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
5291     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
5292                       << "\n");
5293   }
5294 
5295   Scalars[VF].insert(Worklist.begin(), Worklist.end());
5296 }
5297 
5298 bool LoopVectorizationCostModel::isScalarWithPredication(
5299     Instruction *I, ElementCount VF) const {
5300   if (!blockNeedsPredication(I->getParent()))
5301     return false;
5302   switch(I->getOpcode()) {
5303   default:
5304     break;
5305   case Instruction::Load:
5306   case Instruction::Store: {
5307     if (!Legal->isMaskRequired(I))
5308       return false;
5309     auto *Ptr = getLoadStorePointerOperand(I);
5310     auto *Ty = getMemInstValueType(I);
5311     // We have already decided how to vectorize this instruction, get that
5312     // result.
5313     if (VF.isVector()) {
5314       InstWidening WideningDecision = getWideningDecision(I, VF);
5315       assert(WideningDecision != CM_Unknown &&
5316              "Widening decision should be ready at this moment");
5317       return WideningDecision == CM_Scalarize;
5318     }
5319     const Align Alignment = getLoadStoreAlignment(I);
5320     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
5321                                 isLegalMaskedGather(Ty, Alignment))
5322                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
5323                                 isLegalMaskedScatter(Ty, Alignment));
5324   }
5325   case Instruction::UDiv:
5326   case Instruction::SDiv:
5327   case Instruction::SRem:
5328   case Instruction::URem:
5329     return mayDivideByZero(*I);
5330   }
5331   return false;
5332 }
5333 
5334 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
5335     Instruction *I, ElementCount VF) {
5336   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
5337   assert(getWideningDecision(I, VF) == CM_Unknown &&
5338          "Decision should not be set yet.");
5339   auto *Group = getInterleavedAccessGroup(I);
5340   assert(Group && "Must have a group.");
5341 
5342   // If the instruction's allocated size doesn't equal it's type size, it
5343   // requires padding and will be scalarized.
5344   auto &DL = I->getModule()->getDataLayout();
5345   auto *ScalarTy = getMemInstValueType(I);
5346   if (hasIrregularType(ScalarTy, DL))
5347     return false;
5348 
5349   // Check if masking is required.
5350   // A Group may need masking for one of two reasons: it resides in a block that
5351   // needs predication, or it was decided to use masking to deal with gaps.
5352   bool PredicatedAccessRequiresMasking =
5353       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
5354   bool AccessWithGapsRequiresMasking =
5355       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
5356   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
5357     return true;
5358 
5359   // If masked interleaving is required, we expect that the user/target had
5360   // enabled it, because otherwise it either wouldn't have been created or
5361   // it should have been invalidated by the CostModel.
5362   assert(useMaskedInterleavedAccesses(TTI) &&
5363          "Masked interleave-groups for predicated accesses are not enabled.");
5364 
5365   auto *Ty = getMemInstValueType(I);
5366   const Align Alignment = getLoadStoreAlignment(I);
5367   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
5368                           : TTI.isLegalMaskedStore(Ty, Alignment);
5369 }
5370 
5371 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
5372     Instruction *I, ElementCount VF) {
5373   // Get and ensure we have a valid memory instruction.
5374   LoadInst *LI = dyn_cast<LoadInst>(I);
5375   StoreInst *SI = dyn_cast<StoreInst>(I);
5376   assert((LI || SI) && "Invalid memory instruction");
5377 
5378   auto *Ptr = getLoadStorePointerOperand(I);
5379 
5380   // In order to be widened, the pointer should be consecutive, first of all.
5381   if (!Legal->isConsecutivePtr(Ptr))
5382     return false;
5383 
5384   // If the instruction is a store located in a predicated block, it will be
5385   // scalarized.
5386   if (isScalarWithPredication(I))
5387     return false;
5388 
5389   // If the instruction's allocated size doesn't equal it's type size, it
5390   // requires padding and will be scalarized.
5391   auto &DL = I->getModule()->getDataLayout();
5392   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
5393   if (hasIrregularType(ScalarTy, DL))
5394     return false;
5395 
5396   return true;
5397 }
5398 
5399 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
5400   // We should not collect Uniforms more than once per VF. Right now,
5401   // this function is called from collectUniformsAndScalars(), which
5402   // already does this check. Collecting Uniforms for VF=1 does not make any
5403   // sense.
5404 
5405   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
5406          "This function should not be visited twice for the same VF");
5407 
5408   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5409   // not analyze again.  Uniforms.count(VF) will return 1.
5410   Uniforms[VF].clear();
5411 
5412   // We now know that the loop is vectorizable!
5413   // Collect instructions inside the loop that will remain uniform after
5414   // vectorization.
5415 
5416   // Global values, params and instructions outside of current loop are out of
5417   // scope.
5418   auto isOutOfScope = [&](Value *V) -> bool {
5419     Instruction *I = dyn_cast<Instruction>(V);
5420     return (!I || !TheLoop->contains(I));
5421   };
5422 
5423   SetVector<Instruction *> Worklist;
5424   BasicBlock *Latch = TheLoop->getLoopLatch();
5425 
5426   // Instructions that are scalar with predication must not be considered
5427   // uniform after vectorization, because that would create an erroneous
5428   // replicating region where only a single instance out of VF should be formed.
5429   // TODO: optimize such seldom cases if found important, see PR40816.
5430   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5431     if (isOutOfScope(I)) {
5432       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
5433                         << *I << "\n");
5434       return;
5435     }
5436     if (isScalarWithPredication(I, VF)) {
5437       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
5438                         << *I << "\n");
5439       return;
5440     }
5441     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
5442     Worklist.insert(I);
5443   };
5444 
5445   // Start with the conditional branch. If the branch condition is an
5446   // instruction contained in the loop that is only used by the branch, it is
5447   // uniform.
5448   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5449   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5450     addToWorklistIfAllowed(Cmp);
5451 
5452   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5453     InstWidening WideningDecision = getWideningDecision(I, VF);
5454     assert(WideningDecision != CM_Unknown &&
5455            "Widening decision should be ready at this moment");
5456 
5457     // A uniform memory op is itself uniform.  We exclude uniform stores
5458     // here as they demand the last lane, not the first one.
5459     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5460       assert(WideningDecision == CM_Scalarize);
5461       return true;
5462     }
5463 
5464     return (WideningDecision == CM_Widen ||
5465             WideningDecision == CM_Widen_Reverse ||
5466             WideningDecision == CM_Interleave);
5467   };
5468 
5469 
5470   // Returns true if Ptr is the pointer operand of a memory access instruction
5471   // I, and I is known to not require scalarization.
5472   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5473     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5474   };
5475 
5476   // Holds a list of values which are known to have at least one uniform use.
5477   // Note that there may be other uses which aren't uniform.  A "uniform use"
5478   // here is something which only demands lane 0 of the unrolled iterations;
5479   // it does not imply that all lanes produce the same value (e.g. this is not
5480   // the usual meaning of uniform)
5481   SetVector<Value *> HasUniformUse;
5482 
5483   // Scan the loop for instructions which are either a) known to have only
5484   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5485   for (auto *BB : TheLoop->blocks())
5486     for (auto &I : *BB) {
5487       // If there's no pointer operand, there's nothing to do.
5488       auto *Ptr = getLoadStorePointerOperand(&I);
5489       if (!Ptr)
5490         continue;
5491 
5492       // A uniform memory op is itself uniform.  We exclude uniform stores
5493       // here as they demand the last lane, not the first one.
5494       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5495         addToWorklistIfAllowed(&I);
5496 
5497       if (isUniformDecision(&I, VF)) {
5498         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
5499         HasUniformUse.insert(Ptr);
5500       }
5501     }
5502 
5503   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5504   // demanding) users.  Since loops are assumed to be in LCSSA form, this
5505   // disallows uses outside the loop as well.
5506   for (auto *V : HasUniformUse) {
5507     if (isOutOfScope(V))
5508       continue;
5509     auto *I = cast<Instruction>(V);
5510     auto UsersAreMemAccesses =
5511       llvm::all_of(I->users(), [&](User *U) -> bool {
5512         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5513       });
5514     if (UsersAreMemAccesses)
5515       addToWorklistIfAllowed(I);
5516   }
5517 
5518   // Expand Worklist in topological order: whenever a new instruction
5519   // is added , its users should be already inside Worklist.  It ensures
5520   // a uniform instruction will only be used by uniform instructions.
5521   unsigned idx = 0;
5522   while (idx != Worklist.size()) {
5523     Instruction *I = Worklist[idx++];
5524 
5525     for (auto OV : I->operand_values()) {
5526       // isOutOfScope operands cannot be uniform instructions.
5527       if (isOutOfScope(OV))
5528         continue;
5529       // First order recurrence Phi's should typically be considered
5530       // non-uniform.
5531       auto *OP = dyn_cast<PHINode>(OV);
5532       if (OP && Legal->isFirstOrderRecurrence(OP))
5533         continue;
5534       // If all the users of the operand are uniform, then add the
5535       // operand into the uniform worklist.
5536       auto *OI = cast<Instruction>(OV);
5537       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5538             auto *J = cast<Instruction>(U);
5539             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5540           }))
5541         addToWorklistIfAllowed(OI);
5542     }
5543   }
5544 
5545   // For an instruction to be added into Worklist above, all its users inside
5546   // the loop should also be in Worklist. However, this condition cannot be
5547   // true for phi nodes that form a cyclic dependence. We must process phi
5548   // nodes separately. An induction variable will remain uniform if all users
5549   // of the induction variable and induction variable update remain uniform.
5550   // The code below handles both pointer and non-pointer induction variables.
5551   for (auto &Induction : Legal->getInductionVars()) {
5552     auto *Ind = Induction.first;
5553     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5554 
5555     // Determine if all users of the induction variable are uniform after
5556     // vectorization.
5557     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5558       auto *I = cast<Instruction>(U);
5559       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5560              isVectorizedMemAccessUse(I, Ind);
5561     });
5562     if (!UniformInd)
5563       continue;
5564 
5565     // Determine if all users of the induction variable update instruction are
5566     // uniform after vectorization.
5567     auto UniformIndUpdate =
5568         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5569           auto *I = cast<Instruction>(U);
5570           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5571                  isVectorizedMemAccessUse(I, IndUpdate);
5572         });
5573     if (!UniformIndUpdate)
5574       continue;
5575 
5576     // The induction variable and its update instruction will remain uniform.
5577     addToWorklistIfAllowed(Ind);
5578     addToWorklistIfAllowed(IndUpdate);
5579   }
5580 
5581   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5582 }
5583 
5584 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5585   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5586 
5587   if (Legal->getRuntimePointerChecking()->Need) {
5588     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5589         "runtime pointer checks needed. Enable vectorization of this "
5590         "loop with '#pragma clang loop vectorize(enable)' when "
5591         "compiling with -Os/-Oz",
5592         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5593     return true;
5594   }
5595 
5596   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5597     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5598         "runtime SCEV checks needed. Enable vectorization of this "
5599         "loop with '#pragma clang loop vectorize(enable)' when "
5600         "compiling with -Os/-Oz",
5601         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5602     return true;
5603   }
5604 
5605   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5606   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5607     reportVectorizationFailure("Runtime stride check for small trip count",
5608         "runtime stride == 1 checks needed. Enable vectorization of "
5609         "this loop without such check by compiling with -Os/-Oz",
5610         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5611     return true;
5612   }
5613 
5614   return false;
5615 }
5616 
5617 ElementCount
5618 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
5619   if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
5620     reportVectorizationInfo(
5621         "Disabling scalable vectorization, because target does not "
5622         "support scalable vectors.",
5623         "ScalableVectorsUnsupported", ORE, TheLoop);
5624     return ElementCount::getScalable(0);
5625   }
5626 
5627   auto MaxScalableVF = ElementCount::getScalable(
5628       std::numeric_limits<ElementCount::ScalarTy>::max());
5629 
5630   // Disable scalable vectorization if the loop contains unsupported reductions.
5631   // Test that the loop-vectorizer can legalize all operations for this MaxVF.
5632   // FIXME: While for scalable vectors this is currently sufficient, this should
5633   // be replaced by a more detailed mechanism that filters out specific VFs,
5634   // instead of invalidating vectorization for a whole set of VFs based on the
5635   // MaxVF.
5636   if (!canVectorizeReductions(MaxScalableVF)) {
5637     reportVectorizationInfo(
5638         "Scalable vectorization not supported for the reduction "
5639         "operations found in this loop.",
5640         "ScalableVFUnfeasible", ORE, TheLoop);
5641     return ElementCount::getScalable(0);
5642   }
5643 
5644   if (Legal->isSafeForAnyVectorWidth())
5645     return MaxScalableVF;
5646 
5647   // Limit MaxScalableVF by the maximum safe dependence distance.
5648   Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5649   MaxScalableVF = ElementCount::getScalable(
5650       MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5651   if (!MaxScalableVF)
5652     reportVectorizationInfo(
5653         "Max legal vector width too small, scalable vectorization "
5654         "unfeasible.",
5655         "ScalableVFUnfeasible", ORE, TheLoop);
5656 
5657   return MaxScalableVF;
5658 }
5659 
5660 ElementCount
5661 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount,
5662                                                  ElementCount UserVF) {
5663   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5664   unsigned SmallestType, WidestType;
5665   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5666 
5667   // Get the maximum safe dependence distance in bits computed by LAA.
5668   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5669   // the memory accesses that is most restrictive (involved in the smallest
5670   // dependence distance).
5671   unsigned MaxSafeElements =
5672       PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
5673 
5674   auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
5675   auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
5676 
5677   LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
5678                     << ".\n");
5679   LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
5680                     << ".\n");
5681 
5682   // First analyze the UserVF, fall back if the UserVF should be ignored.
5683   if (UserVF) {
5684     auto MaxSafeUserVF =
5685         UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
5686 
5687     if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF))
5688       return UserVF;
5689 
5690     assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
5691 
5692     // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
5693     // is better to ignore the hint and let the compiler choose a suitable VF.
5694     if (!UserVF.isScalable()) {
5695       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5696                         << " is unsafe, clamping to max safe VF="
5697                         << MaxSafeFixedVF << ".\n");
5698       ORE->emit([&]() {
5699         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5700                                           TheLoop->getStartLoc(),
5701                                           TheLoop->getHeader())
5702                << "User-specified vectorization factor "
5703                << ore::NV("UserVectorizationFactor", UserVF)
5704                << " is unsafe, clamping to maximum safe vectorization factor "
5705                << ore::NV("VectorizationFactor", MaxSafeFixedVF);
5706       });
5707       return MaxSafeFixedVF;
5708     }
5709 
5710     LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5711                       << " is unsafe. Ignoring scalable UserVF.\n");
5712     ORE->emit([&]() {
5713       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5714                                         TheLoop->getStartLoc(),
5715                                         TheLoop->getHeader())
5716              << "User-specified vectorization factor "
5717              << ore::NV("UserVectorizationFactor", UserVF)
5718              << " is unsafe. Ignoring the hint to let the compiler pick a "
5719                 "suitable VF.";
5720     });
5721   }
5722 
5723   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5724                     << " / " << WidestType << " bits.\n");
5725 
5726   ElementCount MaxFixedVF = ElementCount::getFixed(1);
5727   if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType,
5728                                            WidestType, MaxSafeFixedVF))
5729     MaxFixedVF = MaxVF;
5730 
5731   if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType,
5732                                            WidestType, MaxSafeScalableVF))
5733     // FIXME: Return scalable VF as well (to be added in future patch).
5734     if (MaxVF.isScalable())
5735       LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
5736                         << "\n");
5737 
5738   return MaxFixedVF;
5739 }
5740 
5741 Optional<ElementCount>
5742 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5743   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5744     // TODO: It may by useful to do since it's still likely to be dynamically
5745     // uniform if the target can skip.
5746     reportVectorizationFailure(
5747         "Not inserting runtime ptr check for divergent target",
5748         "runtime pointer checks needed. Not enabled for divergent target",
5749         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5750     return None;
5751   }
5752 
5753   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5754   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5755   if (TC == 1) {
5756     reportVectorizationFailure("Single iteration (non) loop",
5757         "loop trip count is one, irrelevant for vectorization",
5758         "SingleIterationLoop", ORE, TheLoop);
5759     return None;
5760   }
5761 
5762   switch (ScalarEpilogueStatus) {
5763   case CM_ScalarEpilogueAllowed:
5764     return computeFeasibleMaxVF(TC, UserVF);
5765   case CM_ScalarEpilogueNotAllowedUsePredicate:
5766     LLVM_FALLTHROUGH;
5767   case CM_ScalarEpilogueNotNeededUsePredicate:
5768     LLVM_DEBUG(
5769         dbgs() << "LV: vector predicate hint/switch found.\n"
5770                << "LV: Not allowing scalar epilogue, creating predicated "
5771                << "vector loop.\n");
5772     break;
5773   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5774     // fallthrough as a special case of OptForSize
5775   case CM_ScalarEpilogueNotAllowedOptSize:
5776     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5777       LLVM_DEBUG(
5778           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5779     else
5780       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5781                         << "count.\n");
5782 
5783     // Bail if runtime checks are required, which are not good when optimising
5784     // for size.
5785     if (runtimeChecksRequired())
5786       return None;
5787 
5788     break;
5789   }
5790 
5791   // The only loops we can vectorize without a scalar epilogue, are loops with
5792   // a bottom-test and a single exiting block. We'd have to handle the fact
5793   // that not every instruction executes on the last iteration.  This will
5794   // require a lane mask which varies through the vector loop body.  (TODO)
5795   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5796     // If there was a tail-folding hint/switch, but we can't fold the tail by
5797     // masking, fallback to a vectorization with a scalar epilogue.
5798     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5799       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5800                            "scalar epilogue instead.\n");
5801       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5802       return computeFeasibleMaxVF(TC, UserVF);
5803     }
5804     return None;
5805   }
5806 
5807   // Now try the tail folding
5808 
5809   // Invalidate interleave groups that require an epilogue if we can't mask
5810   // the interleave-group.
5811   if (!useMaskedInterleavedAccesses(TTI)) {
5812     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5813            "No decisions should have been taken at this point");
5814     // Note: There is no need to invalidate any cost modeling decisions here, as
5815     // non where taken so far.
5816     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5817   }
5818 
5819   ElementCount MaxVF = computeFeasibleMaxVF(TC, UserVF);
5820   assert(!MaxVF.isScalable() &&
5821          "Scalable vectors do not yet support tail folding");
5822   assert((UserVF.isNonZero() || isPowerOf2_32(MaxVF.getFixedValue())) &&
5823          "MaxVF must be a power of 2");
5824   unsigned MaxVFtimesIC =
5825       UserIC ? MaxVF.getFixedValue() * UserIC : MaxVF.getFixedValue();
5826   // Avoid tail folding if the trip count is known to be a multiple of any VF we
5827   // chose.
5828   ScalarEvolution *SE = PSE.getSE();
5829   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5830   const SCEV *ExitCount = SE->getAddExpr(
5831       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5832   const SCEV *Rem = SE->getURemExpr(
5833       SE->applyLoopGuards(ExitCount, TheLoop),
5834       SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5835   if (Rem->isZero()) {
5836     // Accept MaxVF if we do not have a tail.
5837     LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5838     return MaxVF;
5839   }
5840 
5841   // If we don't know the precise trip count, or if the trip count that we
5842   // found modulo the vectorization factor is not zero, try to fold the tail
5843   // by masking.
5844   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5845   if (Legal->prepareToFoldTailByMasking()) {
5846     FoldTailByMasking = true;
5847     return MaxVF;
5848   }
5849 
5850   // If there was a tail-folding hint/switch, but we can't fold the tail by
5851   // masking, fallback to a vectorization with a scalar epilogue.
5852   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5853     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5854                          "scalar epilogue instead.\n");
5855     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5856     return MaxVF;
5857   }
5858 
5859   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5860     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5861     return None;
5862   }
5863 
5864   if (TC == 0) {
5865     reportVectorizationFailure(
5866         "Unable to calculate the loop count due to complex control flow",
5867         "unable to calculate the loop count due to complex control flow",
5868         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5869     return None;
5870   }
5871 
5872   reportVectorizationFailure(
5873       "Cannot optimize for size and vectorize at the same time.",
5874       "cannot optimize for size and vectorize at the same time. "
5875       "Enable vectorization of this loop with '#pragma clang loop "
5876       "vectorize(enable)' when compiling with -Os/-Oz",
5877       "NoTailLoopWithOptForSize", ORE, TheLoop);
5878   return None;
5879 }
5880 
5881 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
5882     unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
5883     const ElementCount &MaxSafeVF) {
5884   bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
5885   TypeSize WidestRegister = TTI.getRegisterBitWidth(
5886       ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
5887                            : TargetTransformInfo::RGK_FixedWidthVector);
5888 
5889   // Convenience function to return the minimum of two ElementCounts.
5890   auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
5891     assert((LHS.isScalable() == RHS.isScalable()) &&
5892            "Scalable flags must match");
5893     return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
5894   };
5895 
5896   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5897   // Note that both WidestRegister and WidestType may not be a powers of 2.
5898   auto MaxVectorElementCount = ElementCount::get(
5899       PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
5900       ComputeScalableMaxVF);
5901   MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
5902   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5903                     << (MaxVectorElementCount * WidestType) << " bits.\n");
5904 
5905   if (!MaxVectorElementCount) {
5906     LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
5907     return ElementCount::getFixed(1);
5908   }
5909 
5910   const auto TripCountEC = ElementCount::getFixed(ConstTripCount);
5911   if (ConstTripCount &&
5912       ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) &&
5913       isPowerOf2_32(ConstTripCount)) {
5914     // We need to clamp the VF to be the ConstTripCount. There is no point in
5915     // choosing a higher viable VF as done in the loop below. If
5916     // MaxVectorElementCount is scalable, we only fall back on a fixed VF when
5917     // the TC is less than or equal to the known number of lanes.
5918     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5919                       << ConstTripCount << "\n");
5920     return TripCountEC;
5921   }
5922 
5923   ElementCount MaxVF = MaxVectorElementCount;
5924   if (TTI.shouldMaximizeVectorBandwidth() ||
5925       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5926     auto MaxVectorElementCountMaxBW = ElementCount::get(
5927         PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
5928         ComputeScalableMaxVF);
5929     MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
5930 
5931     // Collect all viable vectorization factors larger than the default MaxVF
5932     // (i.e. MaxVectorElementCount).
5933     SmallVector<ElementCount, 8> VFs;
5934     for (ElementCount VS = MaxVectorElementCount * 2;
5935          ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
5936       VFs.push_back(VS);
5937 
5938     // For each VF calculate its register usage.
5939     auto RUs = calculateRegisterUsage(VFs);
5940 
5941     // Select the largest VF which doesn't require more registers than existing
5942     // ones.
5943     for (int i = RUs.size() - 1; i >= 0; --i) {
5944       bool Selected = true;
5945       for (auto &pair : RUs[i].MaxLocalUsers) {
5946         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5947         if (pair.second > TargetNumRegisters)
5948           Selected = false;
5949       }
5950       if (Selected) {
5951         MaxVF = VFs[i];
5952         break;
5953       }
5954     }
5955     if (ElementCount MinVF =
5956             TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
5957       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5958         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5959                           << ") with target's minimum: " << MinVF << '\n');
5960         MaxVF = MinVF;
5961       }
5962     }
5963   }
5964   return MaxVF;
5965 }
5966 
5967 bool LoopVectorizationCostModel::isMoreProfitable(
5968     const VectorizationFactor &A, const VectorizationFactor &B) const {
5969   InstructionCost::CostType CostA = *A.Cost.getValue();
5970   InstructionCost::CostType CostB = *B.Cost.getValue();
5971 
5972   unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
5973 
5974   if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
5975       MaxTripCount) {
5976     // If we are folding the tail and the trip count is a known (possibly small)
5977     // constant, the trip count will be rounded up to an integer number of
5978     // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
5979     // which we compare directly. When not folding the tail, the total cost will
5980     // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
5981     // approximated with the per-lane cost below instead of using the tripcount
5982     // as here.
5983     int64_t RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
5984     int64_t RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
5985     return RTCostA < RTCostB;
5986   }
5987 
5988   // To avoid the need for FP division:
5989   //      (CostA / A.Width) < (CostB / B.Width)
5990   // <=>  (CostA * B.Width) < (CostB * A.Width)
5991   return (CostA * B.Width.getKnownMinValue()) <
5992          (CostB * A.Width.getKnownMinValue());
5993 }
5994 
5995 VectorizationFactor
5996 LoopVectorizationCostModel::selectVectorizationFactor(ElementCount MaxVF) {
5997   // FIXME: This can be fixed for scalable vectors later, because at this stage
5998   // the LoopVectorizer will only consider vectorizing a loop with scalable
5999   // vectors when the loop has a hint to enable vectorization for a given VF.
6000   assert(!MaxVF.isScalable() && "scalable vectors not yet supported");
6001 
6002   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
6003   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
6004   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
6005 
6006   const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost);
6007   VectorizationFactor ChosenFactor = ScalarCost;
6008 
6009   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
6010   if (ForceVectorization && MaxVF.isVector()) {
6011     // Ignore scalar width, because the user explicitly wants vectorization.
6012     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
6013     // evaluation.
6014     ChosenFactor.Cost = std::numeric_limits<InstructionCost::CostType>::max();
6015   }
6016 
6017   for (auto i = ElementCount::getFixed(2); ElementCount::isKnownLE(i, MaxVF);
6018        i *= 2) {
6019     // Notice that the vector loop needs to be executed less times, so
6020     // we need to divide the cost of the vector loops by the width of
6021     // the vector elements.
6022     VectorizationCostTy C = expectedCost(i);
6023 
6024     assert(C.first.isValid() && "Unexpected invalid cost for vector loop");
6025     VectorizationFactor Candidate(i, C.first);
6026     LLVM_DEBUG(
6027         dbgs() << "LV: Vector loop of width " << i << " costs: "
6028                << (*Candidate.Cost.getValue() / Candidate.Width.getFixedValue())
6029                << ".\n");
6030 
6031     if (!C.second && !ForceVectorization) {
6032       LLVM_DEBUG(
6033           dbgs() << "LV: Not considering vector loop of width " << i
6034                  << " because it will not generate any vector instructions.\n");
6035       continue;
6036     }
6037 
6038     // If profitable add it to ProfitableVF list.
6039     if (isMoreProfitable(Candidate, ScalarCost))
6040       ProfitableVFs.push_back(Candidate);
6041 
6042     if (isMoreProfitable(Candidate, ChosenFactor))
6043       ChosenFactor = Candidate;
6044   }
6045 
6046   if (!EnableCondStoresVectorization && NumPredStores) {
6047     reportVectorizationFailure("There are conditional stores.",
6048         "store that is conditionally executed prevents vectorization",
6049         "ConditionalStore", ORE, TheLoop);
6050     ChosenFactor = ScalarCost;
6051   }
6052 
6053   LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
6054                  *ChosenFactor.Cost.getValue() >= *ScalarCost.Cost.getValue())
6055                  dbgs()
6056              << "LV: Vectorization seems to be not beneficial, "
6057              << "but was forced by a user.\n");
6058   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
6059   return ChosenFactor;
6060 }
6061 
6062 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
6063     const Loop &L, ElementCount VF) const {
6064   // Cross iteration phis such as reductions need special handling and are
6065   // currently unsupported.
6066   if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) {
6067         return Legal->isFirstOrderRecurrence(&Phi) ||
6068                Legal->isReductionVariable(&Phi);
6069       }))
6070     return false;
6071 
6072   // Phis with uses outside of the loop require special handling and are
6073   // currently unsupported.
6074   for (auto &Entry : Legal->getInductionVars()) {
6075     // Look for uses of the value of the induction at the last iteration.
6076     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
6077     for (User *U : PostInc->users())
6078       if (!L.contains(cast<Instruction>(U)))
6079         return false;
6080     // Look for uses of penultimate value of the induction.
6081     for (User *U : Entry.first->users())
6082       if (!L.contains(cast<Instruction>(U)))
6083         return false;
6084   }
6085 
6086   // Induction variables that are widened require special handling that is
6087   // currently not supported.
6088   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
6089         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
6090                  this->isProfitableToScalarize(Entry.first, VF));
6091       }))
6092     return false;
6093 
6094   return true;
6095 }
6096 
6097 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
6098     const ElementCount VF) const {
6099   // FIXME: We need a much better cost-model to take different parameters such
6100   // as register pressure, code size increase and cost of extra branches into
6101   // account. For now we apply a very crude heuristic and only consider loops
6102   // with vectorization factors larger than a certain value.
6103   // We also consider epilogue vectorization unprofitable for targets that don't
6104   // consider interleaving beneficial (eg. MVE).
6105   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
6106     return false;
6107   if (VF.getFixedValue() >= EpilogueVectorizationMinVF)
6108     return true;
6109   return false;
6110 }
6111 
6112 VectorizationFactor
6113 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
6114     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
6115   VectorizationFactor Result = VectorizationFactor::Disabled();
6116   if (!EnableEpilogueVectorization) {
6117     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
6118     return Result;
6119   }
6120 
6121   if (!isScalarEpilogueAllowed()) {
6122     LLVM_DEBUG(
6123         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
6124                   "allowed.\n";);
6125     return Result;
6126   }
6127 
6128   // FIXME: This can be fixed for scalable vectors later, because at this stage
6129   // the LoopVectorizer will only consider vectorizing a loop with scalable
6130   // vectors when the loop has a hint to enable vectorization for a given VF.
6131   if (MainLoopVF.isScalable()) {
6132     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not "
6133                          "yet supported.\n");
6134     return Result;
6135   }
6136 
6137   // Not really a cost consideration, but check for unsupported cases here to
6138   // simplify the logic.
6139   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
6140     LLVM_DEBUG(
6141         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
6142                   "not a supported candidate.\n";);
6143     return Result;
6144   }
6145 
6146   if (EpilogueVectorizationForceVF > 1) {
6147     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
6148     if (LVP.hasPlanWithVFs(
6149             {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)}))
6150       return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0};
6151     else {
6152       LLVM_DEBUG(
6153           dbgs()
6154               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
6155       return Result;
6156     }
6157   }
6158 
6159   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
6160       TheLoop->getHeader()->getParent()->hasMinSize()) {
6161     LLVM_DEBUG(
6162         dbgs()
6163             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
6164     return Result;
6165   }
6166 
6167   if (!isEpilogueVectorizationProfitable(MainLoopVF))
6168     return Result;
6169 
6170   for (auto &NextVF : ProfitableVFs)
6171     if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) &&
6172         (Result.Width.getFixedValue() == 1 ||
6173          isMoreProfitable(NextVF, Result)) &&
6174         LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width}))
6175       Result = NextVF;
6176 
6177   if (Result != VectorizationFactor::Disabled())
6178     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
6179                       << Result.Width.getFixedValue() << "\n";);
6180   return Result;
6181 }
6182 
6183 std::pair<unsigned, unsigned>
6184 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
6185   unsigned MinWidth = -1U;
6186   unsigned MaxWidth = 8;
6187   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6188 
6189   // For each block.
6190   for (BasicBlock *BB : TheLoop->blocks()) {
6191     // For each instruction in the loop.
6192     for (Instruction &I : BB->instructionsWithoutDebug()) {
6193       Type *T = I.getType();
6194 
6195       // Skip ignored values.
6196       if (ValuesToIgnore.count(&I))
6197         continue;
6198 
6199       // Only examine Loads, Stores and PHINodes.
6200       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
6201         continue;
6202 
6203       // Examine PHI nodes that are reduction variables. Update the type to
6204       // account for the recurrence type.
6205       if (auto *PN = dyn_cast<PHINode>(&I)) {
6206         if (!Legal->isReductionVariable(PN))
6207           continue;
6208         RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN];
6209         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
6210             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
6211                                       RdxDesc.getRecurrenceType(),
6212                                       TargetTransformInfo::ReductionFlags()))
6213           continue;
6214         T = RdxDesc.getRecurrenceType();
6215       }
6216 
6217       // Examine the stored values.
6218       if (auto *ST = dyn_cast<StoreInst>(&I))
6219         T = ST->getValueOperand()->getType();
6220 
6221       // Ignore loaded pointer types and stored pointer types that are not
6222       // vectorizable.
6223       //
6224       // FIXME: The check here attempts to predict whether a load or store will
6225       //        be vectorized. We only know this for certain after a VF has
6226       //        been selected. Here, we assume that if an access can be
6227       //        vectorized, it will be. We should also look at extending this
6228       //        optimization to non-pointer types.
6229       //
6230       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
6231           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
6232         continue;
6233 
6234       MinWidth = std::min(MinWidth,
6235                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6236       MaxWidth = std::max(MaxWidth,
6237                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
6238     }
6239   }
6240 
6241   return {MinWidth, MaxWidth};
6242 }
6243 
6244 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
6245                                                            unsigned LoopCost) {
6246   // -- The interleave heuristics --
6247   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6248   // There are many micro-architectural considerations that we can't predict
6249   // at this level. For example, frontend pressure (on decode or fetch) due to
6250   // code size, or the number and capabilities of the execution ports.
6251   //
6252   // We use the following heuristics to select the interleave count:
6253   // 1. If the code has reductions, then we interleave to break the cross
6254   // iteration dependency.
6255   // 2. If the loop is really small, then we interleave to reduce the loop
6256   // overhead.
6257   // 3. We don't interleave if we think that we will spill registers to memory
6258   // due to the increased register pressure.
6259 
6260   if (!isScalarEpilogueAllowed())
6261     return 1;
6262 
6263   // We used the distance for the interleave count.
6264   if (Legal->getMaxSafeDepDistBytes() != -1U)
6265     return 1;
6266 
6267   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
6268   const bool HasReductions = !Legal->getReductionVars().empty();
6269   // Do not interleave loops with a relatively small known or estimated trip
6270   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
6271   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
6272   // because with the above conditions interleaving can expose ILP and break
6273   // cross iteration dependences for reductions.
6274   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
6275       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
6276     return 1;
6277 
6278   RegisterUsage R = calculateRegisterUsage({VF})[0];
6279   // We divide by these constants so assume that we have at least one
6280   // instruction that uses at least one register.
6281   for (auto& pair : R.MaxLocalUsers) {
6282     pair.second = std::max(pair.second, 1U);
6283   }
6284 
6285   // We calculate the interleave count using the following formula.
6286   // Subtract the number of loop invariants from the number of available
6287   // registers. These registers are used by all of the interleaved instances.
6288   // Next, divide the remaining registers by the number of registers that is
6289   // required by the loop, in order to estimate how many parallel instances
6290   // fit without causing spills. All of this is rounded down if necessary to be
6291   // a power of two. We want power of two interleave count to simplify any
6292   // addressing operations or alignment considerations.
6293   // We also want power of two interleave counts to ensure that the induction
6294   // variable of the vector loop wraps to zero, when tail is folded by masking;
6295   // this currently happens when OptForSize, in which case IC is set to 1 above.
6296   unsigned IC = UINT_MAX;
6297 
6298   for (auto& pair : R.MaxLocalUsers) {
6299     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6300     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6301                       << " registers of "
6302                       << TTI.getRegisterClassName(pair.first) << " register class\n");
6303     if (VF.isScalar()) {
6304       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6305         TargetNumRegisters = ForceTargetNumScalarRegs;
6306     } else {
6307       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6308         TargetNumRegisters = ForceTargetNumVectorRegs;
6309     }
6310     unsigned MaxLocalUsers = pair.second;
6311     unsigned LoopInvariantRegs = 0;
6312     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
6313       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
6314 
6315     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
6316     // Don't count the induction variable as interleaved.
6317     if (EnableIndVarRegisterHeur) {
6318       TmpIC =
6319           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
6320                         std::max(1U, (MaxLocalUsers - 1)));
6321     }
6322 
6323     IC = std::min(IC, TmpIC);
6324   }
6325 
6326   // Clamp the interleave ranges to reasonable counts.
6327   unsigned MaxInterleaveCount =
6328       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
6329 
6330   // Check if the user has overridden the max.
6331   if (VF.isScalar()) {
6332     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6333       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6334   } else {
6335     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6336       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6337   }
6338 
6339   // If trip count is known or estimated compile time constant, limit the
6340   // interleave count to be less than the trip count divided by VF, provided it
6341   // is at least 1.
6342   //
6343   // For scalable vectors we can't know if interleaving is beneficial. It may
6344   // not be beneficial for small loops if none of the lanes in the second vector
6345   // iterations is enabled. However, for larger loops, there is likely to be a
6346   // similar benefit as for fixed-width vectors. For now, we choose to leave
6347   // the InterleaveCount as if vscale is '1', although if some information about
6348   // the vector is known (e.g. min vector size), we can make a better decision.
6349   if (BestKnownTC) {
6350     MaxInterleaveCount =
6351         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
6352     // Make sure MaxInterleaveCount is greater than 0.
6353     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
6354   }
6355 
6356   assert(MaxInterleaveCount > 0 &&
6357          "Maximum interleave count must be greater than 0");
6358 
6359   // Clamp the calculated IC to be between the 1 and the max interleave count
6360   // that the target and trip count allows.
6361   if (IC > MaxInterleaveCount)
6362     IC = MaxInterleaveCount;
6363   else
6364     // Make sure IC is greater than 0.
6365     IC = std::max(1u, IC);
6366 
6367   assert(IC > 0 && "Interleave count must be greater than 0.");
6368 
6369   // If we did not calculate the cost for VF (because the user selected the VF)
6370   // then we calculate the cost of VF here.
6371   if (LoopCost == 0) {
6372     assert(expectedCost(VF).first.isValid() && "Expected a valid cost");
6373     LoopCost = *expectedCost(VF).first.getValue();
6374   }
6375 
6376   assert(LoopCost && "Non-zero loop cost expected");
6377 
6378   // Interleave if we vectorized this loop and there is a reduction that could
6379   // benefit from interleaving.
6380   if (VF.isVector() && HasReductions) {
6381     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6382     return IC;
6383   }
6384 
6385   // Note that if we've already vectorized the loop we will have done the
6386   // runtime check and so interleaving won't require further checks.
6387   bool InterleavingRequiresRuntimePointerCheck =
6388       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
6389 
6390   // We want to interleave small loops in order to reduce the loop overhead and
6391   // potentially expose ILP opportunities.
6392   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
6393                     << "LV: IC is " << IC << '\n'
6394                     << "LV: VF is " << VF << '\n');
6395   const bool AggressivelyInterleaveReductions =
6396       TTI.enableAggressiveInterleaving(HasReductions);
6397   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6398     // We assume that the cost overhead is 1 and we use the cost model
6399     // to estimate the cost of the loop and interleave until the cost of the
6400     // loop overhead is about 5% of the cost of the loop.
6401     unsigned SmallIC =
6402         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6403 
6404     // Interleave until store/load ports (estimated by max interleave count) are
6405     // saturated.
6406     unsigned NumStores = Legal->getNumStores();
6407     unsigned NumLoads = Legal->getNumLoads();
6408     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6409     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6410 
6411     // If we have a scalar reduction (vector reductions are already dealt with
6412     // by this point), we can increase the critical path length if the loop
6413     // we're interleaving is inside another loop. Limit, by default to 2, so the
6414     // critical path only gets increased by one reduction operation.
6415     if (HasReductions && TheLoop->getLoopDepth() > 1) {
6416       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6417       SmallIC = std::min(SmallIC, F);
6418       StoresIC = std::min(StoresIC, F);
6419       LoadsIC = std::min(LoadsIC, F);
6420     }
6421 
6422     if (EnableLoadStoreRuntimeInterleave &&
6423         std::max(StoresIC, LoadsIC) > SmallIC) {
6424       LLVM_DEBUG(
6425           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6426       return std::max(StoresIC, LoadsIC);
6427     }
6428 
6429     // If there are scalar reductions and TTI has enabled aggressive
6430     // interleaving for reductions, we will interleave to expose ILP.
6431     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6432         AggressivelyInterleaveReductions) {
6433       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6434       // Interleave no less than SmallIC but not as aggressive as the normal IC
6435       // to satisfy the rare situation when resources are too limited.
6436       return std::max(IC / 2, SmallIC);
6437     } else {
6438       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6439       return SmallIC;
6440     }
6441   }
6442 
6443   // Interleave if this is a large loop (small loops are already dealt with by
6444   // this point) that could benefit from interleaving.
6445   if (AggressivelyInterleaveReductions) {
6446     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6447     return IC;
6448   }
6449 
6450   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6451   return 1;
6452 }
6453 
6454 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6455 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6456   // This function calculates the register usage by measuring the highest number
6457   // of values that are alive at a single location. Obviously, this is a very
6458   // rough estimation. We scan the loop in a topological order in order and
6459   // assign a number to each instruction. We use RPO to ensure that defs are
6460   // met before their users. We assume that each instruction that has in-loop
6461   // users starts an interval. We record every time that an in-loop value is
6462   // used, so we have a list of the first and last occurrences of each
6463   // instruction. Next, we transpose this data structure into a multi map that
6464   // holds the list of intervals that *end* at a specific location. This multi
6465   // map allows us to perform a linear search. We scan the instructions linearly
6466   // and record each time that a new interval starts, by placing it in a set.
6467   // If we find this value in the multi-map then we remove it from the set.
6468   // The max register usage is the maximum size of the set.
6469   // We also search for instructions that are defined outside the loop, but are
6470   // used inside the loop. We need this number separately from the max-interval
6471   // usage number because when we unroll, loop-invariant values do not take
6472   // more register.
6473   LoopBlocksDFS DFS(TheLoop);
6474   DFS.perform(LI);
6475 
6476   RegisterUsage RU;
6477 
6478   // Each 'key' in the map opens a new interval. The values
6479   // of the map are the index of the 'last seen' usage of the
6480   // instruction that is the key.
6481   using IntervalMap = DenseMap<Instruction *, unsigned>;
6482 
6483   // Maps instruction to its index.
6484   SmallVector<Instruction *, 64> IdxToInstr;
6485   // Marks the end of each interval.
6486   IntervalMap EndPoint;
6487   // Saves the list of instruction indices that are used in the loop.
6488   SmallPtrSet<Instruction *, 8> Ends;
6489   // Saves the list of values that are used in the loop but are
6490   // defined outside the loop, such as arguments and constants.
6491   SmallPtrSet<Value *, 8> LoopInvariants;
6492 
6493   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6494     for (Instruction &I : BB->instructionsWithoutDebug()) {
6495       IdxToInstr.push_back(&I);
6496 
6497       // Save the end location of each USE.
6498       for (Value *U : I.operands()) {
6499         auto *Instr = dyn_cast<Instruction>(U);
6500 
6501         // Ignore non-instruction values such as arguments, constants, etc.
6502         if (!Instr)
6503           continue;
6504 
6505         // If this instruction is outside the loop then record it and continue.
6506         if (!TheLoop->contains(Instr)) {
6507           LoopInvariants.insert(Instr);
6508           continue;
6509         }
6510 
6511         // Overwrite previous end points.
6512         EndPoint[Instr] = IdxToInstr.size();
6513         Ends.insert(Instr);
6514       }
6515     }
6516   }
6517 
6518   // Saves the list of intervals that end with the index in 'key'.
6519   using InstrList = SmallVector<Instruction *, 2>;
6520   DenseMap<unsigned, InstrList> TransposeEnds;
6521 
6522   // Transpose the EndPoints to a list of values that end at each index.
6523   for (auto &Interval : EndPoint)
6524     TransposeEnds[Interval.second].push_back(Interval.first);
6525 
6526   SmallPtrSet<Instruction *, 8> OpenIntervals;
6527   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6528   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6529 
6530   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6531 
6532   // A lambda that gets the register usage for the given type and VF.
6533   const auto &TTICapture = TTI;
6534   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) {
6535     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6536       return 0U;
6537     return TTICapture.getRegUsageForType(VectorType::get(Ty, VF));
6538   };
6539 
6540   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6541     Instruction *I = IdxToInstr[i];
6542 
6543     // Remove all of the instructions that end at this location.
6544     InstrList &List = TransposeEnds[i];
6545     for (Instruction *ToRemove : List)
6546       OpenIntervals.erase(ToRemove);
6547 
6548     // Ignore instructions that are never used within the loop.
6549     if (!Ends.count(I))
6550       continue;
6551 
6552     // Skip ignored values.
6553     if (ValuesToIgnore.count(I))
6554       continue;
6555 
6556     // For each VF find the maximum usage of registers.
6557     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6558       // Count the number of live intervals.
6559       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6560 
6561       if (VFs[j].isScalar()) {
6562         for (auto Inst : OpenIntervals) {
6563           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6564           if (RegUsage.find(ClassID) == RegUsage.end())
6565             RegUsage[ClassID] = 1;
6566           else
6567             RegUsage[ClassID] += 1;
6568         }
6569       } else {
6570         collectUniformsAndScalars(VFs[j]);
6571         for (auto Inst : OpenIntervals) {
6572           // Skip ignored values for VF > 1.
6573           if (VecValuesToIgnore.count(Inst))
6574             continue;
6575           if (isScalarAfterVectorization(Inst, VFs[j])) {
6576             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6577             if (RegUsage.find(ClassID) == RegUsage.end())
6578               RegUsage[ClassID] = 1;
6579             else
6580               RegUsage[ClassID] += 1;
6581           } else {
6582             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6583             if (RegUsage.find(ClassID) == RegUsage.end())
6584               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6585             else
6586               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6587           }
6588         }
6589       }
6590 
6591       for (auto& pair : RegUsage) {
6592         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6593           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6594         else
6595           MaxUsages[j][pair.first] = pair.second;
6596       }
6597     }
6598 
6599     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6600                       << OpenIntervals.size() << '\n');
6601 
6602     // Add the current instruction to the list of open intervals.
6603     OpenIntervals.insert(I);
6604   }
6605 
6606   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6607     SmallMapVector<unsigned, unsigned, 4> Invariant;
6608 
6609     for (auto Inst : LoopInvariants) {
6610       unsigned Usage =
6611           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6612       unsigned ClassID =
6613           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6614       if (Invariant.find(ClassID) == Invariant.end())
6615         Invariant[ClassID] = Usage;
6616       else
6617         Invariant[ClassID] += Usage;
6618     }
6619 
6620     LLVM_DEBUG({
6621       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6622       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6623              << " item\n";
6624       for (const auto &pair : MaxUsages[i]) {
6625         dbgs() << "LV(REG): RegisterClass: "
6626                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6627                << " registers\n";
6628       }
6629       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6630              << " item\n";
6631       for (const auto &pair : Invariant) {
6632         dbgs() << "LV(REG): RegisterClass: "
6633                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6634                << " registers\n";
6635       }
6636     });
6637 
6638     RU.LoopInvariantRegs = Invariant;
6639     RU.MaxLocalUsers = MaxUsages[i];
6640     RUs[i] = RU;
6641   }
6642 
6643   return RUs;
6644 }
6645 
6646 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
6647   // TODO: Cost model for emulated masked load/store is completely
6648   // broken. This hack guides the cost model to use an artificially
6649   // high enough value to practically disable vectorization with such
6650   // operations, except where previously deployed legality hack allowed
6651   // using very low cost values. This is to avoid regressions coming simply
6652   // from moving "masked load/store" check from legality to cost model.
6653   // Masked Load/Gather emulation was previously never allowed.
6654   // Limited number of Masked Store/Scatter emulation was allowed.
6655   assert(isPredicatedInst(I, ElementCount::getFixed(1)) &&
6656          "Expecting a scalar emulated instruction");
6657   return isa<LoadInst>(I) ||
6658          (isa<StoreInst>(I) &&
6659           NumPredStores > NumberOfStoresToPredicate);
6660 }
6661 
6662 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6663   // If we aren't vectorizing the loop, or if we've already collected the
6664   // instructions to scalarize, there's nothing to do. Collection may already
6665   // have occurred if we have a user-selected VF and are now computing the
6666   // expected cost for interleaving.
6667   if (VF.isScalar() || VF.isZero() ||
6668       InstsToScalarize.find(VF) != InstsToScalarize.end())
6669     return;
6670 
6671   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6672   // not profitable to scalarize any instructions, the presence of VF in the
6673   // map will indicate that we've analyzed it already.
6674   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6675 
6676   // Find all the instructions that are scalar with predication in the loop and
6677   // determine if it would be better to not if-convert the blocks they are in.
6678   // If so, we also record the instructions to scalarize.
6679   for (BasicBlock *BB : TheLoop->blocks()) {
6680     if (!blockNeedsPredication(BB))
6681       continue;
6682     for (Instruction &I : *BB)
6683       if (isScalarWithPredication(&I)) {
6684         ScalarCostsTy ScalarCosts;
6685         // Do not apply discount logic if hacked cost is needed
6686         // for emulated masked memrefs.
6687         if (!useEmulatedMaskMemRefHack(&I) &&
6688             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6689           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6690         // Remember that BB will remain after vectorization.
6691         PredicatedBBsAfterVectorization.insert(BB);
6692       }
6693   }
6694 }
6695 
6696 int LoopVectorizationCostModel::computePredInstDiscount(
6697     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6698   assert(!isUniformAfterVectorization(PredInst, VF) &&
6699          "Instruction marked uniform-after-vectorization will be predicated");
6700 
6701   // Initialize the discount to zero, meaning that the scalar version and the
6702   // vector version cost the same.
6703   InstructionCost Discount = 0;
6704 
6705   // Holds instructions to analyze. The instructions we visit are mapped in
6706   // ScalarCosts. Those instructions are the ones that would be scalarized if
6707   // we find that the scalar version costs less.
6708   SmallVector<Instruction *, 8> Worklist;
6709 
6710   // Returns true if the given instruction can be scalarized.
6711   auto canBeScalarized = [&](Instruction *I) -> bool {
6712     // We only attempt to scalarize instructions forming a single-use chain
6713     // from the original predicated block that would otherwise be vectorized.
6714     // Although not strictly necessary, we give up on instructions we know will
6715     // already be scalar to avoid traversing chains that are unlikely to be
6716     // beneficial.
6717     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6718         isScalarAfterVectorization(I, VF))
6719       return false;
6720 
6721     // If the instruction is scalar with predication, it will be analyzed
6722     // separately. We ignore it within the context of PredInst.
6723     if (isScalarWithPredication(I))
6724       return false;
6725 
6726     // If any of the instruction's operands are uniform after vectorization,
6727     // the instruction cannot be scalarized. This prevents, for example, a
6728     // masked load from being scalarized.
6729     //
6730     // We assume we will only emit a value for lane zero of an instruction
6731     // marked uniform after vectorization, rather than VF identical values.
6732     // Thus, if we scalarize an instruction that uses a uniform, we would
6733     // create uses of values corresponding to the lanes we aren't emitting code
6734     // for. This behavior can be changed by allowing getScalarValue to clone
6735     // the lane zero values for uniforms rather than asserting.
6736     for (Use &U : I->operands())
6737       if (auto *J = dyn_cast<Instruction>(U.get()))
6738         if (isUniformAfterVectorization(J, VF))
6739           return false;
6740 
6741     // Otherwise, we can scalarize the instruction.
6742     return true;
6743   };
6744 
6745   // Compute the expected cost discount from scalarizing the entire expression
6746   // feeding the predicated instruction. We currently only consider expressions
6747   // that are single-use instruction chains.
6748   Worklist.push_back(PredInst);
6749   while (!Worklist.empty()) {
6750     Instruction *I = Worklist.pop_back_val();
6751 
6752     // If we've already analyzed the instruction, there's nothing to do.
6753     if (ScalarCosts.find(I) != ScalarCosts.end())
6754       continue;
6755 
6756     // Compute the cost of the vector instruction. Note that this cost already
6757     // includes the scalarization overhead of the predicated instruction.
6758     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6759 
6760     // Compute the cost of the scalarized instruction. This cost is the cost of
6761     // the instruction as if it wasn't if-converted and instead remained in the
6762     // predicated block. We will scale this cost by block probability after
6763     // computing the scalarization overhead.
6764     assert(!VF.isScalable() && "scalable vectors not yet supported.");
6765     InstructionCost ScalarCost =
6766         VF.getKnownMinValue() *
6767         getInstructionCost(I, ElementCount::getFixed(1)).first;
6768 
6769     // Compute the scalarization overhead of needed insertelement instructions
6770     // and phi nodes.
6771     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6772       ScalarCost += TTI.getScalarizationOverhead(
6773           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6774           APInt::getAllOnesValue(VF.getKnownMinValue()), true, false);
6775       assert(!VF.isScalable() && "scalable vectors not yet supported.");
6776       ScalarCost +=
6777           VF.getKnownMinValue() *
6778           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6779     }
6780 
6781     // Compute the scalarization overhead of needed extractelement
6782     // instructions. For each of the instruction's operands, if the operand can
6783     // be scalarized, add it to the worklist; otherwise, account for the
6784     // overhead.
6785     for (Use &U : I->operands())
6786       if (auto *J = dyn_cast<Instruction>(U.get())) {
6787         assert(VectorType::isValidElementType(J->getType()) &&
6788                "Instruction has non-scalar type");
6789         if (canBeScalarized(J))
6790           Worklist.push_back(J);
6791         else if (needsExtract(J, VF)) {
6792           assert(!VF.isScalable() && "scalable vectors not yet supported.");
6793           ScalarCost += TTI.getScalarizationOverhead(
6794               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6795               APInt::getAllOnesValue(VF.getKnownMinValue()), false, true);
6796         }
6797       }
6798 
6799     // Scale the total scalar cost by block probability.
6800     ScalarCost /= getReciprocalPredBlockProb();
6801 
6802     // Compute the discount. A non-negative discount means the vector version
6803     // of the instruction costs more, and scalarizing would be beneficial.
6804     Discount += VectorCost - ScalarCost;
6805     ScalarCosts[I] = ScalarCost;
6806   }
6807 
6808   return *Discount.getValue();
6809 }
6810 
6811 LoopVectorizationCostModel::VectorizationCostTy
6812 LoopVectorizationCostModel::expectedCost(ElementCount VF) {
6813   VectorizationCostTy Cost;
6814 
6815   // For each block.
6816   for (BasicBlock *BB : TheLoop->blocks()) {
6817     VectorizationCostTy BlockCost;
6818 
6819     // For each instruction in the old loop.
6820     for (Instruction &I : BB->instructionsWithoutDebug()) {
6821       // Skip ignored values.
6822       if (ValuesToIgnore.count(&I) ||
6823           (VF.isVector() && VecValuesToIgnore.count(&I)))
6824         continue;
6825 
6826       VectorizationCostTy C = getInstructionCost(&I, VF);
6827 
6828       // Check if we should override the cost.
6829       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
6830         C.first = InstructionCost(ForceTargetInstructionCost);
6831 
6832       BlockCost.first += C.first;
6833       BlockCost.second |= C.second;
6834       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6835                         << " for VF " << VF << " For instruction: " << I
6836                         << '\n');
6837     }
6838 
6839     // If we are vectorizing a predicated block, it will have been
6840     // if-converted. This means that the block's instructions (aside from
6841     // stores and instructions that may divide by zero) will now be
6842     // unconditionally executed. For the scalar case, we may not always execute
6843     // the predicated block, if it is an if-else block. Thus, scale the block's
6844     // cost by the probability of executing it. blockNeedsPredication from
6845     // Legal is used so as to not include all blocks in tail folded loops.
6846     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6847       BlockCost.first /= getReciprocalPredBlockProb();
6848 
6849     Cost.first += BlockCost.first;
6850     Cost.second |= BlockCost.second;
6851   }
6852 
6853   return Cost;
6854 }
6855 
6856 /// Gets Address Access SCEV after verifying that the access pattern
6857 /// is loop invariant except the induction variable dependence.
6858 ///
6859 /// This SCEV can be sent to the Target in order to estimate the address
6860 /// calculation cost.
6861 static const SCEV *getAddressAccessSCEV(
6862               Value *Ptr,
6863               LoopVectorizationLegality *Legal,
6864               PredicatedScalarEvolution &PSE,
6865               const Loop *TheLoop) {
6866 
6867   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6868   if (!Gep)
6869     return nullptr;
6870 
6871   // We are looking for a gep with all loop invariant indices except for one
6872   // which should be an induction variable.
6873   auto SE = PSE.getSE();
6874   unsigned NumOperands = Gep->getNumOperands();
6875   for (unsigned i = 1; i < NumOperands; ++i) {
6876     Value *Opd = Gep->getOperand(i);
6877     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6878         !Legal->isInductionVariable(Opd))
6879       return nullptr;
6880   }
6881 
6882   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6883   return PSE.getSCEV(Ptr);
6884 }
6885 
6886 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6887   return Legal->hasStride(I->getOperand(0)) ||
6888          Legal->hasStride(I->getOperand(1));
6889 }
6890 
6891 InstructionCost
6892 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6893                                                         ElementCount VF) {
6894   assert(VF.isVector() &&
6895          "Scalarization cost of instruction implies vectorization.");
6896   if (VF.isScalable())
6897     return InstructionCost::getInvalid();
6898 
6899   Type *ValTy = getMemInstValueType(I);
6900   auto SE = PSE.getSE();
6901 
6902   unsigned AS = getLoadStoreAddressSpace(I);
6903   Value *Ptr = getLoadStorePointerOperand(I);
6904   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6905 
6906   // Figure out whether the access is strided and get the stride value
6907   // if it's known in compile time
6908   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6909 
6910   // Get the cost of the scalar memory instruction and address computation.
6911   InstructionCost Cost =
6912       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6913 
6914   // Don't pass *I here, since it is scalar but will actually be part of a
6915   // vectorized loop where the user of it is a vectorized instruction.
6916   const Align Alignment = getLoadStoreAlignment(I);
6917   Cost += VF.getKnownMinValue() *
6918           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6919                               AS, TTI::TCK_RecipThroughput);
6920 
6921   // Get the overhead of the extractelement and insertelement instructions
6922   // we might create due to scalarization.
6923   Cost += getScalarizationOverhead(I, VF);
6924 
6925   // If we have a predicated load/store, it will need extra i1 extracts and
6926   // conditional branches, but may not be executed for each vector lane. Scale
6927   // the cost by the probability of executing the predicated block.
6928   if (isPredicatedInst(I, ElementCount::getFixed(1))) {
6929     Cost /= getReciprocalPredBlockProb();
6930 
6931     // Add the cost of an i1 extract and a branch
6932     auto *Vec_i1Ty =
6933         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
6934     Cost += TTI.getScalarizationOverhead(
6935         Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
6936         /*Insert=*/false, /*Extract=*/true);
6937     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
6938 
6939     if (useEmulatedMaskMemRefHack(I))
6940       // Artificially setting to a high enough value to practically disable
6941       // vectorization with such operations.
6942       Cost = 3000000;
6943   }
6944 
6945   return Cost;
6946 }
6947 
6948 InstructionCost
6949 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6950                                                     ElementCount VF) {
6951   Type *ValTy = getMemInstValueType(I);
6952   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6953   Value *Ptr = getLoadStorePointerOperand(I);
6954   unsigned AS = getLoadStoreAddressSpace(I);
6955   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
6956   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6957 
6958   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6959          "Stride should be 1 or -1 for consecutive memory access");
6960   const Align Alignment = getLoadStoreAlignment(I);
6961   InstructionCost Cost = 0;
6962   if (Legal->isMaskRequired(I))
6963     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6964                                       CostKind);
6965   else
6966     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6967                                 CostKind, I);
6968 
6969   bool Reverse = ConsecutiveStride < 0;
6970   if (Reverse)
6971     Cost +=
6972         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6973   return Cost;
6974 }
6975 
6976 InstructionCost
6977 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6978                                                 ElementCount VF) {
6979   assert(Legal->isUniformMemOp(*I));
6980 
6981   Type *ValTy = getMemInstValueType(I);
6982   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6983   const Align Alignment = getLoadStoreAlignment(I);
6984   unsigned AS = getLoadStoreAddressSpace(I);
6985   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6986   if (isa<LoadInst>(I)) {
6987     return TTI.getAddressComputationCost(ValTy) +
6988            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6989                                CostKind) +
6990            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6991   }
6992   StoreInst *SI = cast<StoreInst>(I);
6993 
6994   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6995   return TTI.getAddressComputationCost(ValTy) +
6996          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6997                              CostKind) +
6998          (isLoopInvariantStoreValue
6999               ? 0
7000               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
7001                                        VF.getKnownMinValue() - 1));
7002 }
7003 
7004 InstructionCost
7005 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
7006                                                  ElementCount VF) {
7007   Type *ValTy = getMemInstValueType(I);
7008   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7009   const Align Alignment = getLoadStoreAlignment(I);
7010   const Value *Ptr = getLoadStorePointerOperand(I);
7011 
7012   return TTI.getAddressComputationCost(VectorTy) +
7013          TTI.getGatherScatterOpCost(
7014              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
7015              TargetTransformInfo::TCK_RecipThroughput, I);
7016 }
7017 
7018 InstructionCost
7019 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
7020                                                    ElementCount VF) {
7021   // TODO: Once we have support for interleaving with scalable vectors
7022   // we can calculate the cost properly here.
7023   if (VF.isScalable())
7024     return InstructionCost::getInvalid();
7025 
7026   Type *ValTy = getMemInstValueType(I);
7027   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
7028   unsigned AS = getLoadStoreAddressSpace(I);
7029 
7030   auto Group = getInterleavedAccessGroup(I);
7031   assert(Group && "Fail to get an interleaved access group.");
7032 
7033   unsigned InterleaveFactor = Group->getFactor();
7034   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
7035 
7036   // Holds the indices of existing members in an interleaved load group.
7037   // An interleaved store group doesn't need this as it doesn't allow gaps.
7038   SmallVector<unsigned, 4> Indices;
7039   if (isa<LoadInst>(I)) {
7040     for (unsigned i = 0; i < InterleaveFactor; i++)
7041       if (Group->getMember(i))
7042         Indices.push_back(i);
7043   }
7044 
7045   // Calculate the cost of the whole interleaved group.
7046   bool UseMaskForGaps =
7047       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
7048   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
7049       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
7050       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
7051 
7052   if (Group->isReverse()) {
7053     // TODO: Add support for reversed masked interleaved access.
7054     assert(!Legal->isMaskRequired(I) &&
7055            "Reverse masked interleaved access not supported.");
7056     Cost +=
7057         Group->getNumMembers() *
7058         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
7059   }
7060   return Cost;
7061 }
7062 
7063 InstructionCost LoopVectorizationCostModel::getReductionPatternCost(
7064     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
7065   // Early exit for no inloop reductions
7066   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
7067     return InstructionCost::getInvalid();
7068   auto *VectorTy = cast<VectorType>(Ty);
7069 
7070   // We are looking for a pattern of, and finding the minimal acceptable cost:
7071   //  reduce(mul(ext(A), ext(B))) or
7072   //  reduce(mul(A, B)) or
7073   //  reduce(ext(A)) or
7074   //  reduce(A).
7075   // The basic idea is that we walk down the tree to do that, finding the root
7076   // reduction instruction in InLoopReductionImmediateChains. From there we find
7077   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
7078   // of the components. If the reduction cost is lower then we return it for the
7079   // reduction instruction and 0 for the other instructions in the pattern. If
7080   // it is not we return an invalid cost specifying the orignal cost method
7081   // should be used.
7082   Instruction *RetI = I;
7083   if ((RetI->getOpcode() == Instruction::SExt ||
7084        RetI->getOpcode() == Instruction::ZExt)) {
7085     if (!RetI->hasOneUser())
7086       return InstructionCost::getInvalid();
7087     RetI = RetI->user_back();
7088   }
7089   if (RetI->getOpcode() == Instruction::Mul &&
7090       RetI->user_back()->getOpcode() == Instruction::Add) {
7091     if (!RetI->hasOneUser())
7092       return InstructionCost::getInvalid();
7093     RetI = RetI->user_back();
7094   }
7095 
7096   // Test if the found instruction is a reduction, and if not return an invalid
7097   // cost specifying the parent to use the original cost modelling.
7098   if (!InLoopReductionImmediateChains.count(RetI))
7099     return InstructionCost::getInvalid();
7100 
7101   // Find the reduction this chain is a part of and calculate the basic cost of
7102   // the reduction on its own.
7103   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
7104   Instruction *ReductionPhi = LastChain;
7105   while (!isa<PHINode>(ReductionPhi))
7106     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
7107 
7108   RecurrenceDescriptor RdxDesc =
7109       Legal->getReductionVars()[cast<PHINode>(ReductionPhi)];
7110   InstructionCost BaseCost = TTI.getArithmeticReductionCost(
7111       RdxDesc.getOpcode(), VectorTy, false, CostKind);
7112 
7113   // Get the operand that was not the reduction chain and match it to one of the
7114   // patterns, returning the better cost if it is found.
7115   Instruction *RedOp = RetI->getOperand(1) == LastChain
7116                            ? dyn_cast<Instruction>(RetI->getOperand(0))
7117                            : dyn_cast<Instruction>(RetI->getOperand(1));
7118 
7119   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
7120 
7121   if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) &&
7122       !TheLoop->isLoopInvariant(RedOp)) {
7123     bool IsUnsigned = isa<ZExtInst>(RedOp);
7124     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
7125     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7126         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7127         CostKind);
7128 
7129     InstructionCost ExtCost =
7130         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
7131                              TTI::CastContextHint::None, CostKind, RedOp);
7132     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
7133       return I == RetI ? *RedCost.getValue() : 0;
7134   } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) {
7135     Instruction *Mul = RedOp;
7136     Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0));
7137     Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1));
7138     if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) &&
7139         Op0->getOpcode() == Op1->getOpcode() &&
7140         Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
7141         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
7142       bool IsUnsigned = isa<ZExtInst>(Op0);
7143       auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
7144       // reduce(mul(ext, ext))
7145       InstructionCost ExtCost =
7146           TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType,
7147                                TTI::CastContextHint::None, CostKind, Op0);
7148       InstructionCost MulCost =
7149           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
7150 
7151       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7152           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7153           CostKind);
7154 
7155       if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost)
7156         return I == RetI ? *RedCost.getValue() : 0;
7157     } else {
7158       InstructionCost MulCost =
7159           TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind);
7160 
7161       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7162           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
7163           CostKind);
7164 
7165       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
7166         return I == RetI ? *RedCost.getValue() : 0;
7167     }
7168   }
7169 
7170   return I == RetI ? BaseCost : InstructionCost::getInvalid();
7171 }
7172 
7173 InstructionCost
7174 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7175                                                      ElementCount VF) {
7176   // Calculate scalar cost only. Vectorization cost should be ready at this
7177   // moment.
7178   if (VF.isScalar()) {
7179     Type *ValTy = getMemInstValueType(I);
7180     const Align Alignment = getLoadStoreAlignment(I);
7181     unsigned AS = getLoadStoreAddressSpace(I);
7182 
7183     return TTI.getAddressComputationCost(ValTy) +
7184            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
7185                                TTI::TCK_RecipThroughput, I);
7186   }
7187   return getWideningCost(I, VF);
7188 }
7189 
7190 LoopVectorizationCostModel::VectorizationCostTy
7191 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7192                                                ElementCount VF) {
7193   // If we know that this instruction will remain uniform, check the cost of
7194   // the scalar version.
7195   if (isUniformAfterVectorization(I, VF))
7196     VF = ElementCount::getFixed(1);
7197 
7198   if (VF.isVector() && isProfitableToScalarize(I, VF))
7199     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7200 
7201   // Forced scalars do not have any scalarization overhead.
7202   auto ForcedScalar = ForcedScalars.find(VF);
7203   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
7204     auto InstSet = ForcedScalar->second;
7205     if (InstSet.count(I))
7206       return VectorizationCostTy(
7207           (getInstructionCost(I, ElementCount::getFixed(1)).first *
7208            VF.getKnownMinValue()),
7209           false);
7210   }
7211 
7212   Type *VectorTy;
7213   InstructionCost C = getInstructionCost(I, VF, VectorTy);
7214 
7215   bool TypeNotScalarized =
7216       VF.isVector() && VectorTy->isVectorTy() &&
7217       TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue();
7218   return VectorizationCostTy(C, TypeNotScalarized);
7219 }
7220 
7221 InstructionCost
7222 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
7223                                                      ElementCount VF) const {
7224 
7225   if (VF.isScalable())
7226     return InstructionCost::getInvalid();
7227 
7228   if (VF.isScalar())
7229     return 0;
7230 
7231   InstructionCost Cost = 0;
7232   Type *RetTy = ToVectorTy(I->getType(), VF);
7233   if (!RetTy->isVoidTy() &&
7234       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
7235     Cost += TTI.getScalarizationOverhead(
7236         cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()),
7237         true, false);
7238 
7239   // Some targets keep addresses scalar.
7240   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
7241     return Cost;
7242 
7243   // Some targets support efficient element stores.
7244   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
7245     return Cost;
7246 
7247   // Collect operands to consider.
7248   CallInst *CI = dyn_cast<CallInst>(I);
7249   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
7250 
7251   // Skip operands that do not require extraction/scalarization and do not incur
7252   // any overhead.
7253   SmallVector<Type *> Tys;
7254   for (auto *V : filterExtractingOperands(Ops, VF))
7255     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
7256   return Cost + TTI.getOperandsScalarizationOverhead(
7257                     filterExtractingOperands(Ops, VF), Tys);
7258 }
7259 
7260 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
7261   if (VF.isScalar())
7262     return;
7263   NumPredStores = 0;
7264   for (BasicBlock *BB : TheLoop->blocks()) {
7265     // For each instruction in the old loop.
7266     for (Instruction &I : *BB) {
7267       Value *Ptr =  getLoadStorePointerOperand(&I);
7268       if (!Ptr)
7269         continue;
7270 
7271       // TODO: We should generate better code and update the cost model for
7272       // predicated uniform stores. Today they are treated as any other
7273       // predicated store (see added test cases in
7274       // invariant-store-vectorization.ll).
7275       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
7276         NumPredStores++;
7277 
7278       if (Legal->isUniformMemOp(I)) {
7279         // TODO: Avoid replicating loads and stores instead of
7280         // relying on instcombine to remove them.
7281         // Load: Scalar load + broadcast
7282         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
7283         InstructionCost Cost = getUniformMemOpCost(&I, VF);
7284         setWideningDecision(&I, VF, CM_Scalarize, Cost);
7285         continue;
7286       }
7287 
7288       // We assume that widening is the best solution when possible.
7289       if (memoryInstructionCanBeWidened(&I, VF)) {
7290         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
7291         int ConsecutiveStride =
7292                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
7293         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7294                "Expected consecutive stride.");
7295         InstWidening Decision =
7296             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
7297         setWideningDecision(&I, VF, Decision, Cost);
7298         continue;
7299       }
7300 
7301       // Choose between Interleaving, Gather/Scatter or Scalarization.
7302       InstructionCost InterleaveCost = InstructionCost::getInvalid();
7303       unsigned NumAccesses = 1;
7304       if (isAccessInterleaved(&I)) {
7305         auto Group = getInterleavedAccessGroup(&I);
7306         assert(Group && "Fail to get an interleaved access group.");
7307 
7308         // Make one decision for the whole group.
7309         if (getWideningDecision(&I, VF) != CM_Unknown)
7310           continue;
7311 
7312         NumAccesses = Group->getNumMembers();
7313         if (interleavedAccessCanBeWidened(&I, VF))
7314           InterleaveCost = getInterleaveGroupCost(&I, VF);
7315       }
7316 
7317       InstructionCost GatherScatterCost =
7318           isLegalGatherOrScatter(&I)
7319               ? getGatherScatterCost(&I, VF) * NumAccesses
7320               : InstructionCost::getInvalid();
7321 
7322       InstructionCost ScalarizationCost =
7323           getMemInstScalarizationCost(&I, VF) * NumAccesses;
7324 
7325       // Choose better solution for the current VF,
7326       // write down this decision and use it during vectorization.
7327       InstructionCost Cost;
7328       InstWidening Decision;
7329       if (InterleaveCost <= GatherScatterCost &&
7330           InterleaveCost < ScalarizationCost) {
7331         Decision = CM_Interleave;
7332         Cost = InterleaveCost;
7333       } else if (GatherScatterCost < ScalarizationCost) {
7334         Decision = CM_GatherScatter;
7335         Cost = GatherScatterCost;
7336       } else {
7337         assert(!VF.isScalable() &&
7338                "We cannot yet scalarise for scalable vectors");
7339         Decision = CM_Scalarize;
7340         Cost = ScalarizationCost;
7341       }
7342       // If the instructions belongs to an interleave group, the whole group
7343       // receives the same decision. The whole group receives the cost, but
7344       // the cost will actually be assigned to one instruction.
7345       if (auto Group = getInterleavedAccessGroup(&I))
7346         setWideningDecision(Group, VF, Decision, Cost);
7347       else
7348         setWideningDecision(&I, VF, Decision, Cost);
7349     }
7350   }
7351 
7352   // Make sure that any load of address and any other address computation
7353   // remains scalar unless there is gather/scatter support. This avoids
7354   // inevitable extracts into address registers, and also has the benefit of
7355   // activating LSR more, since that pass can't optimize vectorized
7356   // addresses.
7357   if (TTI.prefersVectorizedAddressing())
7358     return;
7359 
7360   // Start with all scalar pointer uses.
7361   SmallPtrSet<Instruction *, 8> AddrDefs;
7362   for (BasicBlock *BB : TheLoop->blocks())
7363     for (Instruction &I : *BB) {
7364       Instruction *PtrDef =
7365         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7366       if (PtrDef && TheLoop->contains(PtrDef) &&
7367           getWideningDecision(&I, VF) != CM_GatherScatter)
7368         AddrDefs.insert(PtrDef);
7369     }
7370 
7371   // Add all instructions used to generate the addresses.
7372   SmallVector<Instruction *, 4> Worklist;
7373   append_range(Worklist, AddrDefs);
7374   while (!Worklist.empty()) {
7375     Instruction *I = Worklist.pop_back_val();
7376     for (auto &Op : I->operands())
7377       if (auto *InstOp = dyn_cast<Instruction>(Op))
7378         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7379             AddrDefs.insert(InstOp).second)
7380           Worklist.push_back(InstOp);
7381   }
7382 
7383   for (auto *I : AddrDefs) {
7384     if (isa<LoadInst>(I)) {
7385       // Setting the desired widening decision should ideally be handled in
7386       // by cost functions, but since this involves the task of finding out
7387       // if the loaded register is involved in an address computation, it is
7388       // instead changed here when we know this is the case.
7389       InstWidening Decision = getWideningDecision(I, VF);
7390       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7391         // Scalarize a widened load of address.
7392         setWideningDecision(
7393             I, VF, CM_Scalarize,
7394             (VF.getKnownMinValue() *
7395              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7396       else if (auto Group = getInterleavedAccessGroup(I)) {
7397         // Scalarize an interleave group of address loads.
7398         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7399           if (Instruction *Member = Group->getMember(I))
7400             setWideningDecision(
7401                 Member, VF, CM_Scalarize,
7402                 (VF.getKnownMinValue() *
7403                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7404         }
7405       }
7406     } else
7407       // Make sure I gets scalarized and a cost estimate without
7408       // scalarization overhead.
7409       ForcedScalars[VF].insert(I);
7410   }
7411 }
7412 
7413 InstructionCost
7414 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7415                                                Type *&VectorTy) {
7416   Type *RetTy = I->getType();
7417   if (canTruncateToMinimalBitwidth(I, VF))
7418     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7419   auto SE = PSE.getSE();
7420   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7421 
7422   auto hasSingleCopyAfterVectorization = [this](Instruction *I,
7423                                                 ElementCount VF) -> bool {
7424     if (VF.isScalar())
7425       return true;
7426 
7427     auto Scalarized = InstsToScalarize.find(VF);
7428     assert(Scalarized != InstsToScalarize.end() &&
7429            "VF not yet analyzed for scalarization profitability");
7430     return !Scalarized->second.count(I) &&
7431            llvm::all_of(I->users(), [&](User *U) {
7432              auto *UI = cast<Instruction>(U);
7433              return !Scalarized->second.count(UI);
7434            });
7435   };
7436   (void) hasSingleCopyAfterVectorization;
7437 
7438   if (isScalarAfterVectorization(I, VF)) {
7439     // With the exception of GEPs and PHIs, after scalarization there should
7440     // only be one copy of the instruction generated in the loop. This is
7441     // because the VF is either 1, or any instructions that need scalarizing
7442     // have already been dealt with by the the time we get here. As a result,
7443     // it means we don't have to multiply the instruction cost by VF.
7444     assert(I->getOpcode() == Instruction::GetElementPtr ||
7445            I->getOpcode() == Instruction::PHI ||
7446            (I->getOpcode() == Instruction::BitCast &&
7447             I->getType()->isPointerTy()) ||
7448            hasSingleCopyAfterVectorization(I, VF));
7449     VectorTy = RetTy;
7450   } else
7451     VectorTy = ToVectorTy(RetTy, VF);
7452 
7453   // TODO: We need to estimate the cost of intrinsic calls.
7454   switch (I->getOpcode()) {
7455   case Instruction::GetElementPtr:
7456     // We mark this instruction as zero-cost because the cost of GEPs in
7457     // vectorized code depends on whether the corresponding memory instruction
7458     // is scalarized or not. Therefore, we handle GEPs with the memory
7459     // instruction cost.
7460     return 0;
7461   case Instruction::Br: {
7462     // In cases of scalarized and predicated instructions, there will be VF
7463     // predicated blocks in the vectorized loop. Each branch around these
7464     // blocks requires also an extract of its vector compare i1 element.
7465     bool ScalarPredicatedBB = false;
7466     BranchInst *BI = cast<BranchInst>(I);
7467     if (VF.isVector() && BI->isConditional() &&
7468         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7469          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7470       ScalarPredicatedBB = true;
7471 
7472     if (ScalarPredicatedBB) {
7473       // Return cost for branches around scalarized and predicated blocks.
7474       assert(!VF.isScalable() && "scalable vectors not yet supported.");
7475       auto *Vec_i1Ty =
7476           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7477       return (TTI.getScalarizationOverhead(
7478                   Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
7479                   false, true) +
7480               (TTI.getCFInstrCost(Instruction::Br, CostKind) *
7481                VF.getKnownMinValue()));
7482     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7483       // The back-edge branch will remain, as will all scalar branches.
7484       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7485     else
7486       // This branch will be eliminated by if-conversion.
7487       return 0;
7488     // Note: We currently assume zero cost for an unconditional branch inside
7489     // a predicated block since it will become a fall-through, although we
7490     // may decide in the future to call TTI for all branches.
7491   }
7492   case Instruction::PHI: {
7493     auto *Phi = cast<PHINode>(I);
7494 
7495     // First-order recurrences are replaced by vector shuffles inside the loop.
7496     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7497     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7498       return TTI.getShuffleCost(
7499           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7500           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7501 
7502     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7503     // converted into select instructions. We require N - 1 selects per phi
7504     // node, where N is the number of incoming values.
7505     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7506       return (Phi->getNumIncomingValues() - 1) *
7507              TTI.getCmpSelInstrCost(
7508                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7509                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7510                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7511 
7512     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7513   }
7514   case Instruction::UDiv:
7515   case Instruction::SDiv:
7516   case Instruction::URem:
7517   case Instruction::SRem:
7518     // If we have a predicated instruction, it may not be executed for each
7519     // vector lane. Get the scalarization cost and scale this amount by the
7520     // probability of executing the predicated block. If the instruction is not
7521     // predicated, we fall through to the next case.
7522     if (VF.isVector() && isScalarWithPredication(I)) {
7523       InstructionCost Cost = 0;
7524 
7525       // These instructions have a non-void type, so account for the phi nodes
7526       // that we will create. This cost is likely to be zero. The phi node
7527       // cost, if any, should be scaled by the block probability because it
7528       // models a copy at the end of each predicated block.
7529       Cost += VF.getKnownMinValue() *
7530               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7531 
7532       // The cost of the non-predicated instruction.
7533       Cost += VF.getKnownMinValue() *
7534               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7535 
7536       // The cost of insertelement and extractelement instructions needed for
7537       // scalarization.
7538       Cost += getScalarizationOverhead(I, VF);
7539 
7540       // Scale the cost by the probability of executing the predicated blocks.
7541       // This assumes the predicated block for each vector lane is equally
7542       // likely.
7543       return Cost / getReciprocalPredBlockProb();
7544     }
7545     LLVM_FALLTHROUGH;
7546   case Instruction::Add:
7547   case Instruction::FAdd:
7548   case Instruction::Sub:
7549   case Instruction::FSub:
7550   case Instruction::Mul:
7551   case Instruction::FMul:
7552   case Instruction::FDiv:
7553   case Instruction::FRem:
7554   case Instruction::Shl:
7555   case Instruction::LShr:
7556   case Instruction::AShr:
7557   case Instruction::And:
7558   case Instruction::Or:
7559   case Instruction::Xor: {
7560     // Since we will replace the stride by 1 the multiplication should go away.
7561     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7562       return 0;
7563 
7564     // Detect reduction patterns
7565     InstructionCost RedCost;
7566     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7567             .isValid())
7568       return RedCost;
7569 
7570     // Certain instructions can be cheaper to vectorize if they have a constant
7571     // second vector operand. One example of this are shifts on x86.
7572     Value *Op2 = I->getOperand(1);
7573     TargetTransformInfo::OperandValueProperties Op2VP;
7574     TargetTransformInfo::OperandValueKind Op2VK =
7575         TTI.getOperandInfo(Op2, Op2VP);
7576     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7577       Op2VK = TargetTransformInfo::OK_UniformValue;
7578 
7579     SmallVector<const Value *, 4> Operands(I->operand_values());
7580     return TTI.getArithmeticInstrCost(
7581         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7582         Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7583   }
7584   case Instruction::FNeg: {
7585     return TTI.getArithmeticInstrCost(
7586         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7587         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None,
7588         TargetTransformInfo::OP_None, I->getOperand(0), I);
7589   }
7590   case Instruction::Select: {
7591     SelectInst *SI = cast<SelectInst>(I);
7592     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7593     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7594 
7595     const Value *Op0, *Op1;
7596     using namespace llvm::PatternMatch;
7597     if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
7598                         match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
7599       // select x, y, false --> x & y
7600       // select x, true, y --> x | y
7601       TTI::OperandValueProperties Op1VP = TTI::OP_None;
7602       TTI::OperandValueProperties Op2VP = TTI::OP_None;
7603       TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP);
7604       TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP);
7605       assert(Op0->getType()->getScalarSizeInBits() == 1 &&
7606               Op1->getType()->getScalarSizeInBits() == 1);
7607 
7608       SmallVector<const Value *, 2> Operands{Op0, Op1};
7609       return TTI.getArithmeticInstrCost(
7610           match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
7611           CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I);
7612     }
7613 
7614     Type *CondTy = SI->getCondition()->getType();
7615     if (!ScalarCond)
7616       CondTy = VectorType::get(CondTy, VF);
7617     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy,
7618                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7619   }
7620   case Instruction::ICmp:
7621   case Instruction::FCmp: {
7622     Type *ValTy = I->getOperand(0)->getType();
7623     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7624     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7625       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7626     VectorTy = ToVectorTy(ValTy, VF);
7627     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7628                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7629   }
7630   case Instruction::Store:
7631   case Instruction::Load: {
7632     ElementCount Width = VF;
7633     if (Width.isVector()) {
7634       InstWidening Decision = getWideningDecision(I, Width);
7635       assert(Decision != CM_Unknown &&
7636              "CM decision should be taken at this point");
7637       if (Decision == CM_Scalarize)
7638         Width = ElementCount::getFixed(1);
7639     }
7640     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
7641     return getMemoryInstructionCost(I, VF);
7642   }
7643   case Instruction::BitCast:
7644     if (I->getType()->isPointerTy())
7645       return 0;
7646     LLVM_FALLTHROUGH;
7647   case Instruction::ZExt:
7648   case Instruction::SExt:
7649   case Instruction::FPToUI:
7650   case Instruction::FPToSI:
7651   case Instruction::FPExt:
7652   case Instruction::PtrToInt:
7653   case Instruction::IntToPtr:
7654   case Instruction::SIToFP:
7655   case Instruction::UIToFP:
7656   case Instruction::Trunc:
7657   case Instruction::FPTrunc: {
7658     // Computes the CastContextHint from a Load/Store instruction.
7659     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7660       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7661              "Expected a load or a store!");
7662 
7663       if (VF.isScalar() || !TheLoop->contains(I))
7664         return TTI::CastContextHint::Normal;
7665 
7666       switch (getWideningDecision(I, VF)) {
7667       case LoopVectorizationCostModel::CM_GatherScatter:
7668         return TTI::CastContextHint::GatherScatter;
7669       case LoopVectorizationCostModel::CM_Interleave:
7670         return TTI::CastContextHint::Interleave;
7671       case LoopVectorizationCostModel::CM_Scalarize:
7672       case LoopVectorizationCostModel::CM_Widen:
7673         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7674                                         : TTI::CastContextHint::Normal;
7675       case LoopVectorizationCostModel::CM_Widen_Reverse:
7676         return TTI::CastContextHint::Reversed;
7677       case LoopVectorizationCostModel::CM_Unknown:
7678         llvm_unreachable("Instr did not go through cost modelling?");
7679       }
7680 
7681       llvm_unreachable("Unhandled case!");
7682     };
7683 
7684     unsigned Opcode = I->getOpcode();
7685     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7686     // For Trunc, the context is the only user, which must be a StoreInst.
7687     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7688       if (I->hasOneUse())
7689         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7690           CCH = ComputeCCH(Store);
7691     }
7692     // For Z/Sext, the context is the operand, which must be a LoadInst.
7693     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7694              Opcode == Instruction::FPExt) {
7695       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7696         CCH = ComputeCCH(Load);
7697     }
7698 
7699     // We optimize the truncation of induction variables having constant
7700     // integer steps. The cost of these truncations is the same as the scalar
7701     // operation.
7702     if (isOptimizableIVTruncate(I, VF)) {
7703       auto *Trunc = cast<TruncInst>(I);
7704       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7705                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7706     }
7707 
7708     // Detect reduction patterns
7709     InstructionCost RedCost;
7710     if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7711             .isValid())
7712       return RedCost;
7713 
7714     Type *SrcScalarTy = I->getOperand(0)->getType();
7715     Type *SrcVecTy =
7716         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7717     if (canTruncateToMinimalBitwidth(I, VF)) {
7718       // This cast is going to be shrunk. This may remove the cast or it might
7719       // turn it into slightly different cast. For example, if MinBW == 16,
7720       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7721       //
7722       // Calculate the modified src and dest types.
7723       Type *MinVecTy = VectorTy;
7724       if (Opcode == Instruction::Trunc) {
7725         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7726         VectorTy =
7727             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7728       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7729         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7730         VectorTy =
7731             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7732       }
7733     }
7734 
7735     return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7736   }
7737   case Instruction::Call: {
7738     bool NeedToScalarize;
7739     CallInst *CI = cast<CallInst>(I);
7740     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7741     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7742       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7743       return std::min(CallCost, IntrinsicCost);
7744     }
7745     return CallCost;
7746   }
7747   case Instruction::ExtractValue:
7748     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7749   default:
7750     // This opcode is unknown. Assume that it is the same as 'mul'.
7751     return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7752   } // end of switch.
7753 }
7754 
7755 char LoopVectorize::ID = 0;
7756 
7757 static const char lv_name[] = "Loop Vectorization";
7758 
7759 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7760 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7761 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7762 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7763 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7764 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7765 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7766 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7767 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7768 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7769 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7770 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7771 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7772 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7773 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7774 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7775 
7776 namespace llvm {
7777 
7778 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7779 
7780 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7781                               bool VectorizeOnlyWhenForced) {
7782   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7783 }
7784 
7785 } // end namespace llvm
7786 
7787 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7788   // Check if the pointer operand of a load or store instruction is
7789   // consecutive.
7790   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7791     return Legal->isConsecutivePtr(Ptr);
7792   return false;
7793 }
7794 
7795 void LoopVectorizationCostModel::collectValuesToIgnore() {
7796   // Ignore ephemeral values.
7797   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7798 
7799   // Ignore type-promoting instructions we identified during reduction
7800   // detection.
7801   for (auto &Reduction : Legal->getReductionVars()) {
7802     RecurrenceDescriptor &RedDes = Reduction.second;
7803     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7804     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7805   }
7806   // Ignore type-casting instructions we identified during induction
7807   // detection.
7808   for (auto &Induction : Legal->getInductionVars()) {
7809     InductionDescriptor &IndDes = Induction.second;
7810     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7811     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7812   }
7813 }
7814 
7815 void LoopVectorizationCostModel::collectInLoopReductions() {
7816   for (auto &Reduction : Legal->getReductionVars()) {
7817     PHINode *Phi = Reduction.first;
7818     RecurrenceDescriptor &RdxDesc = Reduction.second;
7819 
7820     // We don't collect reductions that are type promoted (yet).
7821     if (RdxDesc.getRecurrenceType() != Phi->getType())
7822       continue;
7823 
7824     // If the target would prefer this reduction to happen "in-loop", then we
7825     // want to record it as such.
7826     unsigned Opcode = RdxDesc.getOpcode();
7827     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
7828         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7829                                    TargetTransformInfo::ReductionFlags()))
7830       continue;
7831 
7832     // Check that we can correctly put the reductions into the loop, by
7833     // finding the chain of operations that leads from the phi to the loop
7834     // exit value.
7835     SmallVector<Instruction *, 4> ReductionOperations =
7836         RdxDesc.getReductionOpChain(Phi, TheLoop);
7837     bool InLoop = !ReductionOperations.empty();
7838     if (InLoop) {
7839       InLoopReductionChains[Phi] = ReductionOperations;
7840       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7841       Instruction *LastChain = Phi;
7842       for (auto *I : ReductionOperations) {
7843         InLoopReductionImmediateChains[I] = LastChain;
7844         LastChain = I;
7845       }
7846     }
7847     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7848                       << " reduction for phi: " << *Phi << "\n");
7849   }
7850 }
7851 
7852 // TODO: we could return a pair of values that specify the max VF and
7853 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7854 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7855 // doesn't have a cost model that can choose which plan to execute if
7856 // more than one is generated.
7857 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7858                                  LoopVectorizationCostModel &CM) {
7859   unsigned WidestType;
7860   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7861   return WidestVectorRegBits / WidestType;
7862 }
7863 
7864 VectorizationFactor
7865 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7866   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7867   ElementCount VF = UserVF;
7868   // Outer loop handling: They may require CFG and instruction level
7869   // transformations before even evaluating whether vectorization is profitable.
7870   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7871   // the vectorization pipeline.
7872   if (!OrigLoop->isInnermost()) {
7873     // If the user doesn't provide a vectorization factor, determine a
7874     // reasonable one.
7875     if (UserVF.isZero()) {
7876       VF = ElementCount::getFixed(determineVPlanVF(
7877           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
7878               .getFixedSize(),
7879           CM));
7880       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7881 
7882       // Make sure we have a VF > 1 for stress testing.
7883       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7884         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7885                           << "overriding computed VF.\n");
7886         VF = ElementCount::getFixed(4);
7887       }
7888     }
7889     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7890     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7891            "VF needs to be a power of two");
7892     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7893                       << "VF " << VF << " to build VPlans.\n");
7894     buildVPlans(VF, VF);
7895 
7896     // For VPlan build stress testing, we bail out after VPlan construction.
7897     if (VPlanBuildStressTest)
7898       return VectorizationFactor::Disabled();
7899 
7900     return {VF, 0 /*Cost*/};
7901   }
7902 
7903   LLVM_DEBUG(
7904       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7905                 "VPlan-native path.\n");
7906   return VectorizationFactor::Disabled();
7907 }
7908 
7909 Optional<VectorizationFactor>
7910 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7911   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7912   Optional<ElementCount> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC);
7913   if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved.
7914     return None;
7915 
7916   // Invalidate interleave groups if all blocks of loop will be predicated.
7917   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
7918       !useMaskedInterleavedAccesses(*TTI)) {
7919     LLVM_DEBUG(
7920         dbgs()
7921         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7922            "which requires masked-interleaved support.\n");
7923     if (CM.InterleaveInfo.invalidateGroups())
7924       // Invalidating interleave groups also requires invalidating all decisions
7925       // based on them, which includes widening decisions and uniform and scalar
7926       // values.
7927       CM.invalidateCostModelingDecisions();
7928   }
7929 
7930   ElementCount MaxVF = MaybeMaxVF.getValue();
7931   assert(MaxVF.isNonZero() && "MaxVF is zero.");
7932 
7933   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxVF);
7934   if (!UserVF.isZero() &&
7935       (UserVFIsLegal || (UserVF.isScalable() && MaxVF.isScalable()))) {
7936     // FIXME: MaxVF is temporarily used inplace of UserVF for illegal scalable
7937     // VFs here, this should be reverted to only use legal UserVFs once the
7938     // loop below supports scalable VFs.
7939     ElementCount VF = UserVFIsLegal ? UserVF : MaxVF;
7940     LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max")
7941                       << " VF " << VF << ".\n");
7942     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7943            "VF needs to be a power of two");
7944     // Collect the instructions (and their associated costs) that will be more
7945     // profitable to scalarize.
7946     CM.selectUserVectorizationFactor(VF);
7947     CM.collectInLoopReductions();
7948     buildVPlansWithVPRecipes(VF, VF);
7949     LLVM_DEBUG(printPlans(dbgs()));
7950     return {{VF, 0}};
7951   }
7952 
7953   assert(!MaxVF.isScalable() &&
7954          "Scalable vectors not yet supported beyond this point");
7955 
7956   for (ElementCount VF = ElementCount::getFixed(1);
7957        ElementCount::isKnownLE(VF, MaxVF); VF *= 2) {
7958     // Collect Uniform and Scalar instructions after vectorization with VF.
7959     CM.collectUniformsAndScalars(VF);
7960 
7961     // Collect the instructions (and their associated costs) that will be more
7962     // profitable to scalarize.
7963     if (VF.isVector())
7964       CM.collectInstsToScalarize(VF);
7965   }
7966 
7967   CM.collectInLoopReductions();
7968 
7969   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxVF);
7970   LLVM_DEBUG(printPlans(dbgs()));
7971   if (MaxVF.isScalar())
7972     return VectorizationFactor::Disabled();
7973 
7974   // Select the optimal vectorization factor.
7975   auto SelectedVF = CM.selectVectorizationFactor(MaxVF);
7976 
7977   // Check if it is profitable to vectorize with runtime checks.
7978   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
7979   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
7980     bool PragmaThresholdReached =
7981         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
7982     bool ThresholdReached =
7983         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
7984     if ((ThresholdReached && !Hints.allowReordering()) ||
7985         PragmaThresholdReached) {
7986       ORE->emit([&]() {
7987         return OptimizationRemarkAnalysisAliasing(
7988                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
7989                    OrigLoop->getHeader())
7990                << "loop not vectorized: cannot prove it is safe to reorder "
7991                   "memory operations";
7992       });
7993       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
7994       Hints.emitRemarkWithHints();
7995       return VectorizationFactor::Disabled();
7996     }
7997   }
7998   return SelectedVF;
7999 }
8000 
8001 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) {
8002   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
8003                     << '\n');
8004   BestVF = VF;
8005   BestUF = UF;
8006 
8007   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
8008     return !Plan->hasVF(VF);
8009   });
8010   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
8011 }
8012 
8013 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
8014                                            DominatorTree *DT) {
8015   // Perform the actual loop transformation.
8016 
8017   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
8018   assert(BestVF.hasValue() && "Vectorization Factor is missing");
8019   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
8020 
8021   VPTransformState State{
8022       *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()};
8023   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
8024   State.TripCount = ILV.getOrCreateTripCount(nullptr);
8025   State.CanonicalIV = ILV.Induction;
8026 
8027   ILV.printDebugTracesAtStart();
8028 
8029   //===------------------------------------------------===//
8030   //
8031   // Notice: any optimization or new instruction that go
8032   // into the code below should also be implemented in
8033   // the cost-model.
8034   //
8035   //===------------------------------------------------===//
8036 
8037   // 2. Copy and widen instructions from the old loop into the new loop.
8038   VPlans.front()->execute(&State);
8039 
8040   // 3. Fix the vectorized code: take care of header phi's, live-outs,
8041   //    predication, updating analyses.
8042   ILV.fixVectorizedLoop(State);
8043 
8044   ILV.printDebugTracesAtEnd();
8045 }
8046 
8047 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
8048 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
8049   for (const auto &Plan : VPlans)
8050     if (PrintVPlansInDotFormat)
8051       Plan->printDOT(O);
8052     else
8053       Plan->print(O);
8054 }
8055 #endif
8056 
8057 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
8058     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
8059 
8060   // We create new control-flow for the vectorized loop, so the original exit
8061   // conditions will be dead after vectorization if it's only used by the
8062   // terminator
8063   SmallVector<BasicBlock*> ExitingBlocks;
8064   OrigLoop->getExitingBlocks(ExitingBlocks);
8065   for (auto *BB : ExitingBlocks) {
8066     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
8067     if (!Cmp || !Cmp->hasOneUse())
8068       continue;
8069 
8070     // TODO: we should introduce a getUniqueExitingBlocks on Loop
8071     if (!DeadInstructions.insert(Cmp).second)
8072       continue;
8073 
8074     // The operands of the icmp is often a dead trunc, used by IndUpdate.
8075     // TODO: can recurse through operands in general
8076     for (Value *Op : Cmp->operands()) {
8077       if (isa<TruncInst>(Op) && Op->hasOneUse())
8078           DeadInstructions.insert(cast<Instruction>(Op));
8079     }
8080   }
8081 
8082   // We create new "steps" for induction variable updates to which the original
8083   // induction variables map. An original update instruction will be dead if
8084   // all its users except the induction variable are dead.
8085   auto *Latch = OrigLoop->getLoopLatch();
8086   for (auto &Induction : Legal->getInductionVars()) {
8087     PHINode *Ind = Induction.first;
8088     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
8089 
8090     // If the tail is to be folded by masking, the primary induction variable,
8091     // if exists, isn't dead: it will be used for masking. Don't kill it.
8092     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
8093       continue;
8094 
8095     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
8096           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
8097         }))
8098       DeadInstructions.insert(IndUpdate);
8099 
8100     // We record as "Dead" also the type-casting instructions we had identified
8101     // during induction analysis. We don't need any handling for them in the
8102     // vectorized loop because we have proven that, under a proper runtime
8103     // test guarding the vectorized loop, the value of the phi, and the casted
8104     // value of the phi, are the same. The last instruction in this casting chain
8105     // will get its scalar/vector/widened def from the scalar/vector/widened def
8106     // of the respective phi node. Any other casts in the induction def-use chain
8107     // have no other uses outside the phi update chain, and will be ignored.
8108     InductionDescriptor &IndDes = Induction.second;
8109     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
8110     DeadInstructions.insert(Casts.begin(), Casts.end());
8111   }
8112 }
8113 
8114 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
8115 
8116 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
8117 
8118 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
8119                                         Instruction::BinaryOps BinOp) {
8120   // When unrolling and the VF is 1, we only need to add a simple scalar.
8121   Type *Ty = Val->getType();
8122   assert(!Ty->isVectorTy() && "Val must be a scalar");
8123 
8124   if (Ty->isFloatingPointTy()) {
8125     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
8126 
8127     // Floating-point operations inherit FMF via the builder's flags.
8128     Value *MulOp = Builder.CreateFMul(C, Step);
8129     return Builder.CreateBinOp(BinOp, Val, MulOp);
8130   }
8131   Constant *C = ConstantInt::get(Ty, StartIdx);
8132   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
8133 }
8134 
8135 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
8136   SmallVector<Metadata *, 4> MDs;
8137   // Reserve first location for self reference to the LoopID metadata node.
8138   MDs.push_back(nullptr);
8139   bool IsUnrollMetadata = false;
8140   MDNode *LoopID = L->getLoopID();
8141   if (LoopID) {
8142     // First find existing loop unrolling disable metadata.
8143     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
8144       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
8145       if (MD) {
8146         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
8147         IsUnrollMetadata =
8148             S && S->getString().startswith("llvm.loop.unroll.disable");
8149       }
8150       MDs.push_back(LoopID->getOperand(i));
8151     }
8152   }
8153 
8154   if (!IsUnrollMetadata) {
8155     // Add runtime unroll disable metadata.
8156     LLVMContext &Context = L->getHeader()->getContext();
8157     SmallVector<Metadata *, 1> DisableOperands;
8158     DisableOperands.push_back(
8159         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
8160     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
8161     MDs.push_back(DisableNode);
8162     MDNode *NewLoopID = MDNode::get(Context, MDs);
8163     // Set operand 0 to refer to the loop id itself.
8164     NewLoopID->replaceOperandWith(0, NewLoopID);
8165     L->setLoopID(NewLoopID);
8166   }
8167 }
8168 
8169 //===--------------------------------------------------------------------===//
8170 // EpilogueVectorizerMainLoop
8171 //===--------------------------------------------------------------------===//
8172 
8173 /// This function is partially responsible for generating the control flow
8174 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8175 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
8176   MDNode *OrigLoopID = OrigLoop->getLoopID();
8177   Loop *Lp = createVectorLoopSkeleton("");
8178 
8179   // Generate the code to check the minimum iteration count of the vector
8180   // epilogue (see below).
8181   EPI.EpilogueIterationCountCheck =
8182       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
8183   EPI.EpilogueIterationCountCheck->setName("iter.check");
8184 
8185   // Generate the code to check any assumptions that we've made for SCEV
8186   // expressions.
8187   EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader);
8188 
8189   // Generate the code that checks at runtime if arrays overlap. We put the
8190   // checks into a separate block to make the more common case of few elements
8191   // faster.
8192   EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
8193 
8194   // Generate the iteration count check for the main loop, *after* the check
8195   // for the epilogue loop, so that the path-length is shorter for the case
8196   // that goes directly through the vector epilogue. The longer-path length for
8197   // the main loop is compensated for, by the gain from vectorizing the larger
8198   // trip count. Note: the branch will get updated later on when we vectorize
8199   // the epilogue.
8200   EPI.MainLoopIterationCountCheck =
8201       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
8202 
8203   // Generate the induction variable.
8204   OldInduction = Legal->getPrimaryInduction();
8205   Type *IdxTy = Legal->getWidestInductionType();
8206   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8207   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8208   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8209   EPI.VectorTripCount = CountRoundDown;
8210   Induction =
8211       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8212                               getDebugLocFromInstOrOperands(OldInduction));
8213 
8214   // Skip induction resume value creation here because they will be created in
8215   // the second pass. If we created them here, they wouldn't be used anyway,
8216   // because the vplan in the second pass still contains the inductions from the
8217   // original loop.
8218 
8219   return completeLoopSkeleton(Lp, OrigLoopID);
8220 }
8221 
8222 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
8223   LLVM_DEBUG({
8224     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
8225            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
8226            << ", Main Loop UF:" << EPI.MainLoopUF
8227            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8228            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8229   });
8230 }
8231 
8232 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
8233   DEBUG_WITH_TYPE(VerboseDebug, {
8234     dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n";
8235   });
8236 }
8237 
8238 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
8239     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
8240   assert(L && "Expected valid Loop.");
8241   assert(Bypass && "Expected valid bypass basic block.");
8242   unsigned VFactor =
8243       ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue();
8244   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
8245   Value *Count = getOrCreateTripCount(L);
8246   // Reuse existing vector loop preheader for TC checks.
8247   // Note that new preheader block is generated for vector loop.
8248   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
8249   IRBuilder<> Builder(TCCheckBlock->getTerminator());
8250 
8251   // Generate code to check if the loop's trip count is less than VF * UF of the
8252   // main vector loop.
8253   auto P =
8254       Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8255 
8256   Value *CheckMinIters = Builder.CreateICmp(
8257       P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor),
8258       "min.iters.check");
8259 
8260   if (!ForEpilogue)
8261     TCCheckBlock->setName("vector.main.loop.iter.check");
8262 
8263   // Create new preheader for vector loop.
8264   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
8265                                    DT, LI, nullptr, "vector.ph");
8266 
8267   if (ForEpilogue) {
8268     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
8269                                  DT->getNode(Bypass)->getIDom()) &&
8270            "TC check is expected to dominate Bypass");
8271 
8272     // Update dominator for Bypass & LoopExit.
8273     DT->changeImmediateDominator(Bypass, TCCheckBlock);
8274     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
8275 
8276     LoopBypassBlocks.push_back(TCCheckBlock);
8277 
8278     // Save the trip count so we don't have to regenerate it in the
8279     // vec.epilog.iter.check. This is safe to do because the trip count
8280     // generated here dominates the vector epilog iter check.
8281     EPI.TripCount = Count;
8282   }
8283 
8284   ReplaceInstWithInst(
8285       TCCheckBlock->getTerminator(),
8286       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8287 
8288   return TCCheckBlock;
8289 }
8290 
8291 //===--------------------------------------------------------------------===//
8292 // EpilogueVectorizerEpilogueLoop
8293 //===--------------------------------------------------------------------===//
8294 
8295 /// This function is partially responsible for generating the control flow
8296 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8297 BasicBlock *
8298 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
8299   MDNode *OrigLoopID = OrigLoop->getLoopID();
8300   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
8301 
8302   // Now, compare the remaining count and if there aren't enough iterations to
8303   // execute the vectorized epilogue skip to the scalar part.
8304   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
8305   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
8306   LoopVectorPreHeader =
8307       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
8308                  LI, nullptr, "vec.epilog.ph");
8309   emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader,
8310                                           VecEpilogueIterationCountCheck);
8311 
8312   // Adjust the control flow taking the state info from the main loop
8313   // vectorization into account.
8314   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
8315          "expected this to be saved from the previous pass.");
8316   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8317       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8318 
8319   DT->changeImmediateDominator(LoopVectorPreHeader,
8320                                EPI.MainLoopIterationCountCheck);
8321 
8322   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8323       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8324 
8325   if (EPI.SCEVSafetyCheck)
8326     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8327         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8328   if (EPI.MemSafetyCheck)
8329     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8330         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8331 
8332   DT->changeImmediateDominator(
8333       VecEpilogueIterationCountCheck,
8334       VecEpilogueIterationCountCheck->getSinglePredecessor());
8335 
8336   DT->changeImmediateDominator(LoopScalarPreHeader,
8337                                EPI.EpilogueIterationCountCheck);
8338   DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck);
8339 
8340   // Keep track of bypass blocks, as they feed start values to the induction
8341   // phis in the scalar loop preheader.
8342   if (EPI.SCEVSafetyCheck)
8343     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8344   if (EPI.MemSafetyCheck)
8345     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8346   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8347 
8348   // Generate a resume induction for the vector epilogue and put it in the
8349   // vector epilogue preheader
8350   Type *IdxTy = Legal->getWidestInductionType();
8351   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8352                                          LoopVectorPreHeader->getFirstNonPHI());
8353   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8354   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8355                            EPI.MainLoopIterationCountCheck);
8356 
8357   // Generate the induction variable.
8358   OldInduction = Legal->getPrimaryInduction();
8359   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8360   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8361   Value *StartIdx = EPResumeVal;
8362   Induction =
8363       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8364                               getDebugLocFromInstOrOperands(OldInduction));
8365 
8366   // Generate induction resume values. These variables save the new starting
8367   // indexes for the scalar loop. They are used to test if there are any tail
8368   // iterations left once the vector loop has completed.
8369   // Note that when the vectorized epilogue is skipped due to iteration count
8370   // check, then the resume value for the induction variable comes from
8371   // the trip count of the main vector loop, hence passing the AdditionalBypass
8372   // argument.
8373   createInductionResumeValues(Lp, CountRoundDown,
8374                               {VecEpilogueIterationCountCheck,
8375                                EPI.VectorTripCount} /* AdditionalBypass */);
8376 
8377   AddRuntimeUnrollDisableMetaData(Lp);
8378   return completeLoopSkeleton(Lp, OrigLoopID);
8379 }
8380 
8381 BasicBlock *
8382 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8383     Loop *L, BasicBlock *Bypass, BasicBlock *Insert) {
8384 
8385   assert(EPI.TripCount &&
8386          "Expected trip count to have been safed in the first pass.");
8387   assert(
8388       (!isa<Instruction>(EPI.TripCount) ||
8389        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8390       "saved trip count does not dominate insertion point.");
8391   Value *TC = EPI.TripCount;
8392   IRBuilder<> Builder(Insert->getTerminator());
8393   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8394 
8395   // Generate code to check if the loop's trip count is less than VF * UF of the
8396   // vector epilogue loop.
8397   auto P =
8398       Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8399 
8400   Value *CheckMinIters = Builder.CreateICmp(
8401       P, Count,
8402       ConstantInt::get(Count->getType(),
8403                        EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF),
8404       "min.epilog.iters.check");
8405 
8406   ReplaceInstWithInst(
8407       Insert->getTerminator(),
8408       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8409 
8410   LoopBypassBlocks.push_back(Insert);
8411   return Insert;
8412 }
8413 
8414 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8415   LLVM_DEBUG({
8416     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8417            << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue()
8418            << ", Main Loop UF:" << EPI.MainLoopUF
8419            << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue()
8420            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8421   });
8422 }
8423 
8424 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8425   DEBUG_WITH_TYPE(VerboseDebug, {
8426     dbgs() << "final fn:\n" << *Induction->getFunction() << "\n";
8427   });
8428 }
8429 
8430 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8431     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8432   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8433   bool PredicateAtRangeStart = Predicate(Range.Start);
8434 
8435   for (ElementCount TmpVF = Range.Start * 2;
8436        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8437     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8438       Range.End = TmpVF;
8439       break;
8440     }
8441 
8442   return PredicateAtRangeStart;
8443 }
8444 
8445 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8446 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8447 /// of VF's starting at a given VF and extending it as much as possible. Each
8448 /// vectorization decision can potentially shorten this sub-range during
8449 /// buildVPlan().
8450 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8451                                            ElementCount MaxVF) {
8452   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8453   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8454     VFRange SubRange = {VF, MaxVFPlusOne};
8455     VPlans.push_back(buildVPlan(SubRange));
8456     VF = SubRange.End;
8457   }
8458 }
8459 
8460 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8461                                          VPlanPtr &Plan) {
8462   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8463 
8464   // Look for cached value.
8465   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8466   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8467   if (ECEntryIt != EdgeMaskCache.end())
8468     return ECEntryIt->second;
8469 
8470   VPValue *SrcMask = createBlockInMask(Src, Plan);
8471 
8472   // The terminator has to be a branch inst!
8473   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8474   assert(BI && "Unexpected terminator found");
8475 
8476   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8477     return EdgeMaskCache[Edge] = SrcMask;
8478 
8479   // If source is an exiting block, we know the exit edge is dynamically dead
8480   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8481   // adding uses of an otherwise potentially dead instruction.
8482   if (OrigLoop->isLoopExiting(Src))
8483     return EdgeMaskCache[Edge] = SrcMask;
8484 
8485   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8486   assert(EdgeMask && "No Edge Mask found for condition");
8487 
8488   if (BI->getSuccessor(0) != Dst)
8489     EdgeMask = Builder.createNot(EdgeMask);
8490 
8491   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8492     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8493     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8494     // The select version does not introduce new UB if SrcMask is false and
8495     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8496     VPValue *False = Plan->getOrAddVPValue(
8497         ConstantInt::getFalse(BI->getCondition()->getType()));
8498     EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False);
8499   }
8500 
8501   return EdgeMaskCache[Edge] = EdgeMask;
8502 }
8503 
8504 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8505   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8506 
8507   // Look for cached value.
8508   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8509   if (BCEntryIt != BlockMaskCache.end())
8510     return BCEntryIt->second;
8511 
8512   // All-one mask is modelled as no-mask following the convention for masked
8513   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8514   VPValue *BlockMask = nullptr;
8515 
8516   if (OrigLoop->getHeader() == BB) {
8517     if (!CM.blockNeedsPredication(BB))
8518       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8519 
8520     // Create the block in mask as the first non-phi instruction in the block.
8521     VPBuilder::InsertPointGuard Guard(Builder);
8522     auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi();
8523     Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint);
8524 
8525     // Introduce the early-exit compare IV <= BTC to form header block mask.
8526     // This is used instead of IV < TC because TC may wrap, unlike BTC.
8527     // Start by constructing the desired canonical IV.
8528     VPValue *IV = nullptr;
8529     if (Legal->getPrimaryInduction())
8530       IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction());
8531     else {
8532       auto IVRecipe = new VPWidenCanonicalIVRecipe();
8533       Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint);
8534       IV = IVRecipe->getVPSingleValue();
8535     }
8536     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8537     bool TailFolded = !CM.isScalarEpilogueAllowed();
8538 
8539     if (TailFolded && CM.TTI.emitGetActiveLaneMask()) {
8540       // While ActiveLaneMask is a binary op that consumes the loop tripcount
8541       // as a second argument, we only pass the IV here and extract the
8542       // tripcount from the transform state where codegen of the VP instructions
8543       // happen.
8544       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV});
8545     } else {
8546       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8547     }
8548     return BlockMaskCache[BB] = BlockMask;
8549   }
8550 
8551   // This is the block mask. We OR all incoming edges.
8552   for (auto *Predecessor : predecessors(BB)) {
8553     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8554     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8555       return BlockMaskCache[BB] = EdgeMask;
8556 
8557     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8558       BlockMask = EdgeMask;
8559       continue;
8560     }
8561 
8562     BlockMask = Builder.createOr(BlockMask, EdgeMask);
8563   }
8564 
8565   return BlockMaskCache[BB] = BlockMask;
8566 }
8567 
8568 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8569                                                 ArrayRef<VPValue *> Operands,
8570                                                 VFRange &Range,
8571                                                 VPlanPtr &Plan) {
8572   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8573          "Must be called with either a load or store");
8574 
8575   auto willWiden = [&](ElementCount VF) -> bool {
8576     if (VF.isScalar())
8577       return false;
8578     LoopVectorizationCostModel::InstWidening Decision =
8579         CM.getWideningDecision(I, VF);
8580     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8581            "CM decision should be taken at this point.");
8582     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8583       return true;
8584     if (CM.isScalarAfterVectorization(I, VF) ||
8585         CM.isProfitableToScalarize(I, VF))
8586       return false;
8587     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8588   };
8589 
8590   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8591     return nullptr;
8592 
8593   VPValue *Mask = nullptr;
8594   if (Legal->isMaskRequired(I))
8595     Mask = createBlockInMask(I->getParent(), Plan);
8596 
8597   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8598     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask);
8599 
8600   StoreInst *Store = cast<StoreInst>(I);
8601   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8602                                             Mask);
8603 }
8604 
8605 VPWidenIntOrFpInductionRecipe *
8606 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi,
8607                                            ArrayRef<VPValue *> Operands) const {
8608   // Check if this is an integer or fp induction. If so, build the recipe that
8609   // produces its scalar and vector values.
8610   InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8611   if (II.getKind() == InductionDescriptor::IK_IntInduction ||
8612       II.getKind() == InductionDescriptor::IK_FpInduction) {
8613     assert(II.getStartValue() ==
8614            Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8615     const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts();
8616     return new VPWidenIntOrFpInductionRecipe(
8617         Phi, Operands[0], Casts.empty() ? nullptr : Casts.front());
8618   }
8619 
8620   return nullptr;
8621 }
8622 
8623 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8624     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range,
8625     VPlan &Plan) const {
8626   // Optimize the special case where the source is a constant integer
8627   // induction variable. Notice that we can only optimize the 'trunc' case
8628   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8629   // (c) other casts depend on pointer size.
8630 
8631   // Determine whether \p K is a truncation based on an induction variable that
8632   // can be optimized.
8633   auto isOptimizableIVTruncate =
8634       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8635     return [=](ElementCount VF) -> bool {
8636       return CM.isOptimizableIVTruncate(K, VF);
8637     };
8638   };
8639 
8640   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8641           isOptimizableIVTruncate(I), Range)) {
8642 
8643     InductionDescriptor II =
8644         Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0)));
8645     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8646     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
8647                                              Start, nullptr, I);
8648   }
8649   return nullptr;
8650 }
8651 
8652 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8653                                                 ArrayRef<VPValue *> Operands,
8654                                                 VPlanPtr &Plan) {
8655   // If all incoming values are equal, the incoming VPValue can be used directly
8656   // instead of creating a new VPBlendRecipe.
8657   VPValue *FirstIncoming = Operands[0];
8658   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8659         return FirstIncoming == Inc;
8660       })) {
8661     return Operands[0];
8662   }
8663 
8664   // We know that all PHIs in non-header blocks are converted into selects, so
8665   // we don't have to worry about the insertion order and we can just use the
8666   // builder. At this point we generate the predication tree. There may be
8667   // duplications since this is a simple recursive scan, but future
8668   // optimizations will clean it up.
8669   SmallVector<VPValue *, 2> OperandsWithMask;
8670   unsigned NumIncoming = Phi->getNumIncomingValues();
8671 
8672   for (unsigned In = 0; In < NumIncoming; In++) {
8673     VPValue *EdgeMask =
8674       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8675     assert((EdgeMask || NumIncoming == 1) &&
8676            "Multiple predecessors with one having a full mask");
8677     OperandsWithMask.push_back(Operands[In]);
8678     if (EdgeMask)
8679       OperandsWithMask.push_back(EdgeMask);
8680   }
8681   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8682 }
8683 
8684 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8685                                                    ArrayRef<VPValue *> Operands,
8686                                                    VFRange &Range) const {
8687 
8688   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8689       [this, CI](ElementCount VF) {
8690         return CM.isScalarWithPredication(CI, VF);
8691       },
8692       Range);
8693 
8694   if (IsPredicated)
8695     return nullptr;
8696 
8697   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8698   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8699              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8700              ID == Intrinsic::pseudoprobe ||
8701              ID == Intrinsic::experimental_noalias_scope_decl))
8702     return nullptr;
8703 
8704   auto willWiden = [&](ElementCount VF) -> bool {
8705     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8706     // The following case may be scalarized depending on the VF.
8707     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8708     // version of the instruction.
8709     // Is it beneficial to perform intrinsic call compared to lib call?
8710     bool NeedToScalarize = false;
8711     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8712     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8713     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8714     assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
8715            "Either the intrinsic cost or vector call cost must be valid");
8716     return UseVectorIntrinsic || !NeedToScalarize;
8717   };
8718 
8719   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8720     return nullptr;
8721 
8722   ArrayRef<VPValue *> Ops = Operands.take_front(CI->getNumArgOperands());
8723   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8724 }
8725 
8726 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8727   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8728          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8729   // Instruction should be widened, unless it is scalar after vectorization,
8730   // scalarization is profitable or it is predicated.
8731   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8732     return CM.isScalarAfterVectorization(I, VF) ||
8733            CM.isProfitableToScalarize(I, VF) ||
8734            CM.isScalarWithPredication(I, VF);
8735   };
8736   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8737                                                              Range);
8738 }
8739 
8740 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8741                                            ArrayRef<VPValue *> Operands) const {
8742   auto IsVectorizableOpcode = [](unsigned Opcode) {
8743     switch (Opcode) {
8744     case Instruction::Add:
8745     case Instruction::And:
8746     case Instruction::AShr:
8747     case Instruction::BitCast:
8748     case Instruction::FAdd:
8749     case Instruction::FCmp:
8750     case Instruction::FDiv:
8751     case Instruction::FMul:
8752     case Instruction::FNeg:
8753     case Instruction::FPExt:
8754     case Instruction::FPToSI:
8755     case Instruction::FPToUI:
8756     case Instruction::FPTrunc:
8757     case Instruction::FRem:
8758     case Instruction::FSub:
8759     case Instruction::ICmp:
8760     case Instruction::IntToPtr:
8761     case Instruction::LShr:
8762     case Instruction::Mul:
8763     case Instruction::Or:
8764     case Instruction::PtrToInt:
8765     case Instruction::SDiv:
8766     case Instruction::Select:
8767     case Instruction::SExt:
8768     case Instruction::Shl:
8769     case Instruction::SIToFP:
8770     case Instruction::SRem:
8771     case Instruction::Sub:
8772     case Instruction::Trunc:
8773     case Instruction::UDiv:
8774     case Instruction::UIToFP:
8775     case Instruction::URem:
8776     case Instruction::Xor:
8777     case Instruction::ZExt:
8778       return true;
8779     }
8780     return false;
8781   };
8782 
8783   if (!IsVectorizableOpcode(I->getOpcode()))
8784     return nullptr;
8785 
8786   // Success: widen this instruction.
8787   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8788 }
8789 
8790 void VPRecipeBuilder::fixHeaderPhis() {
8791   BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
8792   for (VPWidenPHIRecipe *R : PhisToFix) {
8793     auto *PN = cast<PHINode>(R->getUnderlyingValue());
8794     VPRecipeBase *IncR =
8795         getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8796     R->addOperand(IncR->getVPSingleValue());
8797   }
8798 }
8799 
8800 VPBasicBlock *VPRecipeBuilder::handleReplication(
8801     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8802     VPlanPtr &Plan) {
8803   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8804       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8805       Range);
8806 
8807   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8808       [&](ElementCount VF) { return CM.isPredicatedInst(I, VF); }, Range);
8809 
8810   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8811                                        IsUniform, IsPredicated);
8812   setRecipe(I, Recipe);
8813   Plan->addVPValue(I, Recipe);
8814 
8815   // Find if I uses a predicated instruction. If so, it will use its scalar
8816   // value. Avoid hoisting the insert-element which packs the scalar value into
8817   // a vector value, as that happens iff all users use the vector value.
8818   for (VPValue *Op : Recipe->operands()) {
8819     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
8820     if (!PredR)
8821       continue;
8822     auto *RepR =
8823         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
8824     assert(RepR->isPredicated() &&
8825            "expected Replicate recipe to be predicated");
8826     RepR->setAlsoPack(false);
8827   }
8828 
8829   // Finalize the recipe for Instr, first if it is not predicated.
8830   if (!IsPredicated) {
8831     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8832     VPBB->appendRecipe(Recipe);
8833     return VPBB;
8834   }
8835   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8836   assert(VPBB->getSuccessors().empty() &&
8837          "VPBB has successors when handling predicated replication.");
8838   // Record predicated instructions for above packing optimizations.
8839   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8840   VPBlockUtils::insertBlockAfter(Region, VPBB);
8841   auto *RegSucc = new VPBasicBlock();
8842   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8843   return RegSucc;
8844 }
8845 
8846 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8847                                                       VPRecipeBase *PredRecipe,
8848                                                       VPlanPtr &Plan) {
8849   // Instructions marked for predication are replicated and placed under an
8850   // if-then construct to prevent side-effects.
8851 
8852   // Generate recipes to compute the block mask for this region.
8853   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8854 
8855   // Build the triangular if-then region.
8856   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8857   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8858   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8859   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8860   auto *PHIRecipe = Instr->getType()->isVoidTy()
8861                         ? nullptr
8862                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8863   if (PHIRecipe) {
8864     Plan->removeVPValueFor(Instr);
8865     Plan->addVPValue(Instr, PHIRecipe);
8866   }
8867   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8868   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8869   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8870 
8871   // Note: first set Entry as region entry and then connect successors starting
8872   // from it in order, to propagate the "parent" of each VPBasicBlock.
8873   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8874   VPBlockUtils::connectBlocks(Pred, Exit);
8875 
8876   return Region;
8877 }
8878 
8879 VPRecipeOrVPValueTy
8880 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8881                                         ArrayRef<VPValue *> Operands,
8882                                         VFRange &Range, VPlanPtr &Plan) {
8883   // First, check for specific widening recipes that deal with calls, memory
8884   // operations, inductions and Phi nodes.
8885   if (auto *CI = dyn_cast<CallInst>(Instr))
8886     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
8887 
8888   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8889     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
8890 
8891   VPRecipeBase *Recipe;
8892   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8893     if (Phi->getParent() != OrigLoop->getHeader())
8894       return tryToBlend(Phi, Operands, Plan);
8895     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands)))
8896       return toVPRecipeResult(Recipe);
8897 
8898     if (Legal->isReductionVariable(Phi)) {
8899       RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
8900       assert(RdxDesc.getRecurrenceStartValue() ==
8901              Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8902       VPValue *StartV = Operands[0];
8903 
8904       // Record the PHI and the incoming value from the backedge, so we can add
8905       // the incoming value from the backedge after all recipes have been
8906       // created.
8907       auto *PhiRecipe = new VPWidenPHIRecipe(Phi, RdxDesc, *StartV);
8908       PhisToFix.push_back(PhiRecipe);
8909       recordRecipeOf(cast<Instruction>(
8910           Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
8911       return toVPRecipeResult(PhiRecipe);
8912     }
8913 
8914     return toVPRecipeResult(new VPWidenPHIRecipe(Phi));
8915   }
8916 
8917   if (isa<TruncInst>(Instr) &&
8918       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
8919                                                Range, *Plan)))
8920     return toVPRecipeResult(Recipe);
8921 
8922   if (!shouldWiden(Instr, Range))
8923     return nullptr;
8924 
8925   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8926     return toVPRecipeResult(new VPWidenGEPRecipe(
8927         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
8928 
8929   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8930     bool InvariantCond =
8931         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8932     return toVPRecipeResult(new VPWidenSelectRecipe(
8933         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
8934   }
8935 
8936   return toVPRecipeResult(tryToWiden(Instr, Operands));
8937 }
8938 
8939 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8940                                                         ElementCount MaxVF) {
8941   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8942 
8943   // Collect instructions from the original loop that will become trivially dead
8944   // in the vectorized loop. We don't need to vectorize these instructions. For
8945   // example, original induction update instructions can become dead because we
8946   // separately emit induction "steps" when generating code for the new loop.
8947   // Similarly, we create a new latch condition when setting up the structure
8948   // of the new loop, so the old one can become dead.
8949   SmallPtrSet<Instruction *, 4> DeadInstructions;
8950   collectTriviallyDeadInstructions(DeadInstructions);
8951 
8952   // Add assume instructions we need to drop to DeadInstructions, to prevent
8953   // them from being added to the VPlan.
8954   // TODO: We only need to drop assumes in blocks that get flattend. If the
8955   // control flow is preserved, we should keep them.
8956   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8957   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8958 
8959   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8960   // Dead instructions do not need sinking. Remove them from SinkAfter.
8961   for (Instruction *I : DeadInstructions)
8962     SinkAfter.erase(I);
8963 
8964   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8965   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8966     VFRange SubRange = {VF, MaxVFPlusOne};
8967     VPlans.push_back(
8968         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
8969     VF = SubRange.End;
8970   }
8971 }
8972 
8973 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
8974     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
8975     const DenseMap<Instruction *, Instruction *> &SinkAfter) {
8976 
8977   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
8978 
8979   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
8980 
8981   // ---------------------------------------------------------------------------
8982   // Pre-construction: record ingredients whose recipes we'll need to further
8983   // process after constructing the initial VPlan.
8984   // ---------------------------------------------------------------------------
8985 
8986   // Mark instructions we'll need to sink later and their targets as
8987   // ingredients whose recipe we'll need to record.
8988   for (auto &Entry : SinkAfter) {
8989     RecipeBuilder.recordRecipeOf(Entry.first);
8990     RecipeBuilder.recordRecipeOf(Entry.second);
8991   }
8992   for (auto &Reduction : CM.getInLoopReductionChains()) {
8993     PHINode *Phi = Reduction.first;
8994     RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind();
8995     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
8996 
8997     RecipeBuilder.recordRecipeOf(Phi);
8998     for (auto &R : ReductionOperations) {
8999       RecipeBuilder.recordRecipeOf(R);
9000       // For min/max reducitons, where we have a pair of icmp/select, we also
9001       // need to record the ICmp recipe, so it can be removed later.
9002       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
9003         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
9004     }
9005   }
9006 
9007   // For each interleave group which is relevant for this (possibly trimmed)
9008   // Range, add it to the set of groups to be later applied to the VPlan and add
9009   // placeholders for its members' Recipes which we'll be replacing with a
9010   // single VPInterleaveRecipe.
9011   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
9012     auto applyIG = [IG, this](ElementCount VF) -> bool {
9013       return (VF.isVector() && // Query is illegal for VF == 1
9014               CM.getWideningDecision(IG->getInsertPos(), VF) ==
9015                   LoopVectorizationCostModel::CM_Interleave);
9016     };
9017     if (!getDecisionAndClampRange(applyIG, Range))
9018       continue;
9019     InterleaveGroups.insert(IG);
9020     for (unsigned i = 0; i < IG->getFactor(); i++)
9021       if (Instruction *Member = IG->getMember(i))
9022         RecipeBuilder.recordRecipeOf(Member);
9023   };
9024 
9025   // ---------------------------------------------------------------------------
9026   // Build initial VPlan: Scan the body of the loop in a topological order to
9027   // visit each basic block after having visited its predecessor basic blocks.
9028   // ---------------------------------------------------------------------------
9029 
9030   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
9031   auto Plan = std::make_unique<VPlan>();
9032   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
9033   Plan->setEntry(VPBB);
9034 
9035   // Scan the body of the loop in a topological order to visit each basic block
9036   // after having visited its predecessor basic blocks.
9037   LoopBlocksDFS DFS(OrigLoop);
9038   DFS.perform(LI);
9039 
9040   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
9041     // Relevant instructions from basic block BB will be grouped into VPRecipe
9042     // ingredients and fill a new VPBasicBlock.
9043     unsigned VPBBsForBB = 0;
9044     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
9045     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
9046     VPBB = FirstVPBBForBB;
9047     Builder.setInsertPoint(VPBB);
9048 
9049     // Introduce each ingredient into VPlan.
9050     // TODO: Model and preserve debug instrinsics in VPlan.
9051     for (Instruction &I : BB->instructionsWithoutDebug()) {
9052       Instruction *Instr = &I;
9053 
9054       // First filter out irrelevant instructions, to ensure no recipes are
9055       // built for them.
9056       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
9057         continue;
9058 
9059       SmallVector<VPValue *, 4> Operands;
9060       auto *Phi = dyn_cast<PHINode>(Instr);
9061       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
9062         Operands.push_back(Plan->getOrAddVPValue(
9063             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
9064       } else {
9065         auto OpRange = Plan->mapToVPValues(Instr->operands());
9066         Operands = {OpRange.begin(), OpRange.end()};
9067       }
9068       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
9069               Instr, Operands, Range, Plan)) {
9070         // If Instr can be simplified to an existing VPValue, use it.
9071         if (RecipeOrValue.is<VPValue *>()) {
9072           Plan->addVPValue(Instr, RecipeOrValue.get<VPValue *>());
9073           continue;
9074         }
9075         // Otherwise, add the new recipe.
9076         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
9077         for (auto *Def : Recipe->definedValues()) {
9078           auto *UV = Def->getUnderlyingValue();
9079           Plan->addVPValue(UV, Def);
9080         }
9081 
9082         RecipeBuilder.setRecipe(Instr, Recipe);
9083         VPBB->appendRecipe(Recipe);
9084         continue;
9085       }
9086 
9087       // Otherwise, if all widening options failed, Instruction is to be
9088       // replicated. This may create a successor for VPBB.
9089       VPBasicBlock *NextVPBB =
9090           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
9091       if (NextVPBB != VPBB) {
9092         VPBB = NextVPBB;
9093         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
9094                                     : "");
9095       }
9096     }
9097   }
9098 
9099   RecipeBuilder.fixHeaderPhis();
9100 
9101   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
9102   // may also be empty, such as the last one VPBB, reflecting original
9103   // basic-blocks with no recipes.
9104   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
9105   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
9106   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
9107   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
9108   delete PreEntry;
9109 
9110   // ---------------------------------------------------------------------------
9111   // Transform initial VPlan: Apply previously taken decisions, in order, to
9112   // bring the VPlan to its final state.
9113   // ---------------------------------------------------------------------------
9114 
9115   // Apply Sink-After legal constraints.
9116   for (auto &Entry : SinkAfter) {
9117     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
9118     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
9119 
9120     // If the target is in a replication region, make sure to move Sink to the
9121     // block after it, not into the replication region itself.
9122     if (auto *Region =
9123             dyn_cast_or_null<VPRegionBlock>(Target->getParent()->getParent())) {
9124       if (Region->isReplicator()) {
9125         assert(Region->getNumSuccessors() == 1 && "Expected SESE region!");
9126         VPBasicBlock *NextBlock =
9127             cast<VPBasicBlock>(Region->getSuccessors().front());
9128         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
9129         continue;
9130       }
9131     }
9132 
9133     auto *SinkRegion =
9134         dyn_cast_or_null<VPRegionBlock>(Sink->getParent()->getParent());
9135     // Unless the sink source is in a replicate region, sink the recipe
9136     // directly.
9137     if (!SinkRegion || !SinkRegion->isReplicator()) {
9138       Sink->moveAfter(Target);
9139       continue;
9140     }
9141 
9142     // If the sink source is in a replicate region, we need to move the whole
9143     // replicate region, which should only contain a single recipe in the main
9144     // block.
9145     assert(Sink->getParent()->size() == 1 &&
9146            "parent must be a replicator with a single recipe");
9147     auto *SplitBlock =
9148         Target->getParent()->splitAt(std::next(Target->getIterator()));
9149 
9150     auto *Pred = SinkRegion->getSinglePredecessor();
9151     auto *Succ = SinkRegion->getSingleSuccessor();
9152     VPBlockUtils::disconnectBlocks(Pred, SinkRegion);
9153     VPBlockUtils::disconnectBlocks(SinkRegion, Succ);
9154     VPBlockUtils::connectBlocks(Pred, Succ);
9155 
9156     auto *SplitPred = SplitBlock->getSinglePredecessor();
9157 
9158     VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
9159     VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
9160     VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
9161     if (VPBB == SplitPred)
9162       VPBB = SplitBlock;
9163   }
9164 
9165   // Interleave memory: for each Interleave Group we marked earlier as relevant
9166   // for this VPlan, replace the Recipes widening its memory instructions with a
9167   // single VPInterleaveRecipe at its insertion point.
9168   for (auto IG : InterleaveGroups) {
9169     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
9170         RecipeBuilder.getRecipe(IG->getInsertPos()));
9171     SmallVector<VPValue *, 4> StoredValues;
9172     for (unsigned i = 0; i < IG->getFactor(); ++i)
9173       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i)))
9174         StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0)));
9175 
9176     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
9177                                         Recipe->getMask());
9178     VPIG->insertBefore(Recipe);
9179     unsigned J = 0;
9180     for (unsigned i = 0; i < IG->getFactor(); ++i)
9181       if (Instruction *Member = IG->getMember(i)) {
9182         if (!Member->getType()->isVoidTy()) {
9183           VPValue *OriginalV = Plan->getVPValue(Member);
9184           Plan->removeVPValueFor(Member);
9185           Plan->addVPValue(Member, VPIG->getVPValue(J));
9186           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
9187           J++;
9188         }
9189         RecipeBuilder.getRecipe(Member)->eraseFromParent();
9190       }
9191   }
9192 
9193   // Adjust the recipes for any inloop reductions.
9194   if (Range.Start.isVector())
9195     adjustRecipesForInLoopReductions(Plan, RecipeBuilder);
9196 
9197   // Finally, if tail is folded by masking, introduce selects between the phi
9198   // and the live-out instruction of each reduction, at the end of the latch.
9199   if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) {
9200     Builder.setInsertPoint(VPBB);
9201     auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
9202     for (auto &Reduction : Legal->getReductionVars()) {
9203       if (CM.isInLoopReduction(Reduction.first))
9204         continue;
9205       VPValue *Phi = Plan->getOrAddVPValue(Reduction.first);
9206       VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr());
9207       Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
9208     }
9209   }
9210 
9211   std::string PlanName;
9212   raw_string_ostream RSO(PlanName);
9213   ElementCount VF = Range.Start;
9214   Plan->addVF(VF);
9215   RSO << "Initial VPlan for VF={" << VF;
9216   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
9217     Plan->addVF(VF);
9218     RSO << "," << VF;
9219   }
9220   RSO << "},UF>=1";
9221   RSO.flush();
9222   Plan->setName(PlanName);
9223 
9224   return Plan;
9225 }
9226 
9227 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9228   // Outer loop handling: They may require CFG and instruction level
9229   // transformations before even evaluating whether vectorization is profitable.
9230   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9231   // the vectorization pipeline.
9232   assert(!OrigLoop->isInnermost());
9233   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9234 
9235   // Create new empty VPlan
9236   auto Plan = std::make_unique<VPlan>();
9237 
9238   // Build hierarchical CFG
9239   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9240   HCFGBuilder.buildHierarchicalCFG();
9241 
9242   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9243        VF *= 2)
9244     Plan->addVF(VF);
9245 
9246   if (EnableVPlanPredication) {
9247     VPlanPredicator VPP(*Plan);
9248     VPP.predicate();
9249 
9250     // Avoid running transformation to recipes until masked code generation in
9251     // VPlan-native path is in place.
9252     return Plan;
9253   }
9254 
9255   SmallPtrSet<Instruction *, 1> DeadInstructions;
9256   VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan,
9257                                              Legal->getInductionVars(),
9258                                              DeadInstructions, *PSE.getSE());
9259   return Plan;
9260 }
9261 
9262 // Adjust the recipes for any inloop reductions. The chain of instructions
9263 // leading from the loop exit instr to the phi need to be converted to
9264 // reductions, with one operand being vector and the other being the scalar
9265 // reduction chain.
9266 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions(
9267     VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) {
9268   for (auto &Reduction : CM.getInLoopReductionChains()) {
9269     PHINode *Phi = Reduction.first;
9270     RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
9271     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9272 
9273     // ReductionOperations are orders top-down from the phi's use to the
9274     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9275     // which of the two operands will remain scalar and which will be reduced.
9276     // For minmax the chain will be the select instructions.
9277     Instruction *Chain = Phi;
9278     for (Instruction *R : ReductionOperations) {
9279       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9280       RecurKind Kind = RdxDesc.getRecurrenceKind();
9281 
9282       VPValue *ChainOp = Plan->getVPValue(Chain);
9283       unsigned FirstOpId;
9284       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9285         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9286                "Expected to replace a VPWidenSelectSC");
9287         FirstOpId = 1;
9288       } else {
9289         assert(isa<VPWidenRecipe>(WidenRecipe) &&
9290                "Expected to replace a VPWidenSC");
9291         FirstOpId = 0;
9292       }
9293       unsigned VecOpId =
9294           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9295       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9296 
9297       auto *CondOp = CM.foldTailByMasking()
9298                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9299                          : nullptr;
9300       VPReductionRecipe *RedRecipe = new VPReductionRecipe(
9301           &RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9302       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9303       Plan->removeVPValueFor(R);
9304       Plan->addVPValue(R, RedRecipe);
9305       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9306       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9307       WidenRecipe->eraseFromParent();
9308 
9309       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9310         VPRecipeBase *CompareRecipe =
9311             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9312         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9313                "Expected to replace a VPWidenSC");
9314         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9315                "Expected no remaining users");
9316         CompareRecipe->eraseFromParent();
9317       }
9318       Chain = R;
9319     }
9320   }
9321 }
9322 
9323 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9324 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9325                                VPSlotTracker &SlotTracker) const {
9326   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9327   IG->getInsertPos()->printAsOperand(O, false);
9328   O << ", ";
9329   getAddr()->printAsOperand(O, SlotTracker);
9330   VPValue *Mask = getMask();
9331   if (Mask) {
9332     O << ", ";
9333     Mask->printAsOperand(O, SlotTracker);
9334   }
9335   for (unsigned i = 0; i < IG->getFactor(); ++i)
9336     if (Instruction *I = IG->getMember(i))
9337       O << "\n" << Indent << "  " << VPlanIngredient(I) << " " << i;
9338 }
9339 #endif
9340 
9341 void VPWidenCallRecipe::execute(VPTransformState &State) {
9342   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9343                                   *this, State);
9344 }
9345 
9346 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9347   State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()),
9348                                     this, *this, InvariantCond, State);
9349 }
9350 
9351 void VPWidenRecipe::execute(VPTransformState &State) {
9352   State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State);
9353 }
9354 
9355 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9356   State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this,
9357                       *this, State.UF, State.VF, IsPtrLoopInvariant,
9358                       IsIndexLoopInvariant, State);
9359 }
9360 
9361 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9362   assert(!State.Instance && "Int or FP induction being replicated.");
9363   State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(),
9364                                    getTruncInst(), getVPValue(0),
9365                                    getCastValue(), State);
9366 }
9367 
9368 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9369   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), RdxDesc,
9370                                  this, State);
9371 }
9372 
9373 void VPBlendRecipe::execute(VPTransformState &State) {
9374   State.ILV->setDebugLocFromInst(State.Builder, Phi);
9375   // We know that all PHIs in non-header blocks are converted into
9376   // selects, so we don't have to worry about the insertion order and we
9377   // can just use the builder.
9378   // At this point we generate the predication tree. There may be
9379   // duplications since this is a simple recursive scan, but future
9380   // optimizations will clean it up.
9381 
9382   unsigned NumIncoming = getNumIncomingValues();
9383 
9384   // Generate a sequence of selects of the form:
9385   // SELECT(Mask3, In3,
9386   //        SELECT(Mask2, In2,
9387   //               SELECT(Mask1, In1,
9388   //                      In0)))
9389   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9390   // are essentially undef are taken from In0.
9391   InnerLoopVectorizer::VectorParts Entry(State.UF);
9392   for (unsigned In = 0; In < NumIncoming; ++In) {
9393     for (unsigned Part = 0; Part < State.UF; ++Part) {
9394       // We might have single edge PHIs (blocks) - use an identity
9395       // 'select' for the first PHI operand.
9396       Value *In0 = State.get(getIncomingValue(In), Part);
9397       if (In == 0)
9398         Entry[Part] = In0; // Initialize with the first incoming value.
9399       else {
9400         // Select between the current value and the previous incoming edge
9401         // based on the incoming mask.
9402         Value *Cond = State.get(getMask(In), Part);
9403         Entry[Part] =
9404             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9405       }
9406     }
9407   }
9408   for (unsigned Part = 0; Part < State.UF; ++Part)
9409     State.set(this, Entry[Part], Part);
9410 }
9411 
9412 void VPInterleaveRecipe::execute(VPTransformState &State) {
9413   assert(!State.Instance && "Interleave group being replicated.");
9414   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9415                                       getStoredValues(), getMask());
9416 }
9417 
9418 void VPReductionRecipe::execute(VPTransformState &State) {
9419   assert(!State.Instance && "Reduction being replicated.");
9420   Value *PrevInChain = State.get(getChainOp(), 0);
9421   for (unsigned Part = 0; Part < State.UF; ++Part) {
9422     RecurKind Kind = RdxDesc->getRecurrenceKind();
9423     bool IsOrdered = useOrderedReductions(*RdxDesc);
9424     Value *NewVecOp = State.get(getVecOp(), Part);
9425     if (VPValue *Cond = getCondOp()) {
9426       Value *NewCond = State.get(Cond, Part);
9427       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9428       Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
9429           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9430       Constant *IdenVec =
9431           ConstantVector::getSplat(VecTy->getElementCount(), Iden);
9432       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9433       NewVecOp = Select;
9434     }
9435     Value *NewRed;
9436     Value *NextInChain;
9437     if (IsOrdered) {
9438       NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9439                                       PrevInChain);
9440       PrevInChain = NewRed;
9441     } else {
9442       PrevInChain = State.get(getChainOp(), Part);
9443       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9444     }
9445     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9446       NextInChain =
9447           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9448                          NewRed, PrevInChain);
9449     } else if (IsOrdered)
9450       NextInChain = NewRed;
9451     else {
9452       NextInChain = State.Builder.CreateBinOp(
9453           (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed,
9454           PrevInChain);
9455     }
9456     State.set(this, NextInChain, Part);
9457   }
9458 }
9459 
9460 void VPReplicateRecipe::execute(VPTransformState &State) {
9461   if (State.Instance) { // Generate a single instance.
9462     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9463     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
9464                                     *State.Instance, IsPredicated, State);
9465     // Insert scalar instance packing it into a vector.
9466     if (AlsoPack && State.VF.isVector()) {
9467       // If we're constructing lane 0, initialize to start from poison.
9468       if (State.Instance->Lane.isFirstLane()) {
9469         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9470         Value *Poison = PoisonValue::get(
9471             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9472         State.set(this, Poison, State.Instance->Part);
9473       }
9474       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9475     }
9476     return;
9477   }
9478 
9479   // Generate scalar instances for all VF lanes of all UF parts, unless the
9480   // instruction is uniform inwhich case generate only the first lane for each
9481   // of the UF parts.
9482   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9483   assert((!State.VF.isScalable() || IsUniform) &&
9484          "Can't scalarize a scalable vector");
9485   for (unsigned Part = 0; Part < State.UF; ++Part)
9486     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9487       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this,
9488                                       VPIteration(Part, Lane), IsPredicated,
9489                                       State);
9490 }
9491 
9492 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9493   assert(State.Instance && "Branch on Mask works only on single instance.");
9494 
9495   unsigned Part = State.Instance->Part;
9496   unsigned Lane = State.Instance->Lane.getKnownLane();
9497 
9498   Value *ConditionBit = nullptr;
9499   VPValue *BlockInMask = getMask();
9500   if (BlockInMask) {
9501     ConditionBit = State.get(BlockInMask, Part);
9502     if (ConditionBit->getType()->isVectorTy())
9503       ConditionBit = State.Builder.CreateExtractElement(
9504           ConditionBit, State.Builder.getInt32(Lane));
9505   } else // Block in mask is all-one.
9506     ConditionBit = State.Builder.getTrue();
9507 
9508   // Replace the temporary unreachable terminator with a new conditional branch,
9509   // whose two destinations will be set later when they are created.
9510   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9511   assert(isa<UnreachableInst>(CurrentTerminator) &&
9512          "Expected to replace unreachable terminator with conditional branch.");
9513   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9514   CondBr->setSuccessor(0, nullptr);
9515   ReplaceInstWithInst(CurrentTerminator, CondBr);
9516 }
9517 
9518 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9519   assert(State.Instance && "Predicated instruction PHI works per instance.");
9520   Instruction *ScalarPredInst =
9521       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9522   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9523   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9524   assert(PredicatingBB && "Predicated block has no single predecessor.");
9525   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9526          "operand must be VPReplicateRecipe");
9527 
9528   // By current pack/unpack logic we need to generate only a single phi node: if
9529   // a vector value for the predicated instruction exists at this point it means
9530   // the instruction has vector users only, and a phi for the vector value is
9531   // needed. In this case the recipe of the predicated instruction is marked to
9532   // also do that packing, thereby "hoisting" the insert-element sequence.
9533   // Otherwise, a phi node for the scalar value is needed.
9534   unsigned Part = State.Instance->Part;
9535   if (State.hasVectorValue(getOperand(0), Part)) {
9536     Value *VectorValue = State.get(getOperand(0), Part);
9537     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9538     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9539     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9540     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9541     if (State.hasVectorValue(this, Part))
9542       State.reset(this, VPhi, Part);
9543     else
9544       State.set(this, VPhi, Part);
9545     // NOTE: Currently we need to update the value of the operand, so the next
9546     // predicated iteration inserts its generated value in the correct vector.
9547     State.reset(getOperand(0), VPhi, Part);
9548   } else {
9549     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9550     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9551     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9552                      PredicatingBB);
9553     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9554     if (State.hasScalarValue(this, *State.Instance))
9555       State.reset(this, Phi, *State.Instance);
9556     else
9557       State.set(this, Phi, *State.Instance);
9558     // NOTE: Currently we need to update the value of the operand, so the next
9559     // predicated iteration inserts its generated value in the correct vector.
9560     State.reset(getOperand(0), Phi, *State.Instance);
9561   }
9562 }
9563 
9564 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9565   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9566   State.ILV->vectorizeMemoryInstruction(
9567       &Ingredient, State, StoredValue ? nullptr : getVPSingleValue(), getAddr(),
9568       StoredValue, getMask());
9569 }
9570 
9571 // Determine how to lower the scalar epilogue, which depends on 1) optimising
9572 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
9573 // predication, and 4) a TTI hook that analyses whether the loop is suitable
9574 // for predication.
9575 static ScalarEpilogueLowering getScalarEpilogueLowering(
9576     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
9577     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
9578     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
9579     LoopVectorizationLegality &LVL) {
9580   // 1) OptSize takes precedence over all other options, i.e. if this is set,
9581   // don't look at hints or options, and don't request a scalar epilogue.
9582   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
9583   // LoopAccessInfo (due to code dependency and not being able to reliably get
9584   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
9585   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
9586   // versioning when the vectorization is forced, unlike hasOptSize. So revert
9587   // back to the old way and vectorize with versioning when forced. See D81345.)
9588   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
9589                                                       PGSOQueryType::IRPass) &&
9590                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
9591     return CM_ScalarEpilogueNotAllowedOptSize;
9592 
9593   // 2) If set, obey the directives
9594   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
9595     switch (PreferPredicateOverEpilogue) {
9596     case PreferPredicateTy::ScalarEpilogue:
9597       return CM_ScalarEpilogueAllowed;
9598     case PreferPredicateTy::PredicateElseScalarEpilogue:
9599       return CM_ScalarEpilogueNotNeededUsePredicate;
9600     case PreferPredicateTy::PredicateOrDontVectorize:
9601       return CM_ScalarEpilogueNotAllowedUsePredicate;
9602     };
9603   }
9604 
9605   // 3) If set, obey the hints
9606   switch (Hints.getPredicate()) {
9607   case LoopVectorizeHints::FK_Enabled:
9608     return CM_ScalarEpilogueNotNeededUsePredicate;
9609   case LoopVectorizeHints::FK_Disabled:
9610     return CM_ScalarEpilogueAllowed;
9611   };
9612 
9613   // 4) if the TTI hook indicates this is profitable, request predication.
9614   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
9615                                        LVL.getLAI()))
9616     return CM_ScalarEpilogueNotNeededUsePredicate;
9617 
9618   return CM_ScalarEpilogueAllowed;
9619 }
9620 
9621 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
9622   // If Values have been set for this Def return the one relevant for \p Part.
9623   if (hasVectorValue(Def, Part))
9624     return Data.PerPartOutput[Def][Part];
9625 
9626   if (!hasScalarValue(Def, {Part, 0})) {
9627     Value *IRV = Def->getLiveInIRValue();
9628     Value *B = ILV->getBroadcastInstrs(IRV);
9629     set(Def, B, Part);
9630     return B;
9631   }
9632 
9633   Value *ScalarValue = get(Def, {Part, 0});
9634   // If we aren't vectorizing, we can just copy the scalar map values over
9635   // to the vector map.
9636   if (VF.isScalar()) {
9637     set(Def, ScalarValue, Part);
9638     return ScalarValue;
9639   }
9640 
9641   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
9642   bool IsUniform = RepR && RepR->isUniform();
9643 
9644   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
9645   // Check if there is a scalar value for the selected lane.
9646   if (!hasScalarValue(Def, {Part, LastLane})) {
9647     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
9648     assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) &&
9649            "unexpected recipe found to be invariant");
9650     IsUniform = true;
9651     LastLane = 0;
9652   }
9653 
9654   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
9655 
9656   // Set the insert point after the last scalarized instruction. This
9657   // ensures the insertelement sequence will directly follow the scalar
9658   // definitions.
9659   auto OldIP = Builder.saveIP();
9660   auto NewIP = std::next(BasicBlock::iterator(LastInst));
9661   Builder.SetInsertPoint(&*NewIP);
9662 
9663   // However, if we are vectorizing, we need to construct the vector values.
9664   // If the value is known to be uniform after vectorization, we can just
9665   // broadcast the scalar value corresponding to lane zero for each unroll
9666   // iteration. Otherwise, we construct the vector values using
9667   // insertelement instructions. Since the resulting vectors are stored in
9668   // State, we will only generate the insertelements once.
9669   Value *VectorValue = nullptr;
9670   if (IsUniform) {
9671     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
9672     set(Def, VectorValue, Part);
9673   } else {
9674     // Initialize packing with insertelements to start from undef.
9675     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
9676     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
9677     set(Def, Undef, Part);
9678     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
9679       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
9680     VectorValue = get(Def, Part);
9681   }
9682   Builder.restoreIP(OldIP);
9683   return VectorValue;
9684 }
9685 
9686 // Process the loop in the VPlan-native vectorization path. This path builds
9687 // VPlan upfront in the vectorization pipeline, which allows to apply
9688 // VPlan-to-VPlan transformations from the very beginning without modifying the
9689 // input LLVM IR.
9690 static bool processLoopInVPlanNativePath(
9691     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
9692     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
9693     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
9694     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
9695     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
9696     LoopVectorizationRequirements &Requirements) {
9697 
9698   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
9699     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
9700     return false;
9701   }
9702   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
9703   Function *F = L->getHeader()->getParent();
9704   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
9705 
9706   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9707       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
9708 
9709   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
9710                                 &Hints, IAI);
9711   // Use the planner for outer loop vectorization.
9712   // TODO: CM is not used at this point inside the planner. Turn CM into an
9713   // optional argument if we don't need it in the future.
9714   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
9715                                Requirements, ORE);
9716 
9717   // Get user vectorization factor.
9718   ElementCount UserVF = Hints.getWidth();
9719 
9720   // Plan how to best vectorize, return the best VF and its cost.
9721   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
9722 
9723   // If we are stress testing VPlan builds, do not attempt to generate vector
9724   // code. Masked vector code generation support will follow soon.
9725   // Also, do not attempt to vectorize if no vector code will be produced.
9726   if (VPlanBuildStressTest || EnableVPlanPredication ||
9727       VectorizationFactor::Disabled() == VF)
9728     return false;
9729 
9730   LVP.setBestPlan(VF.Width, 1);
9731 
9732   {
9733     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
9734                              F->getParent()->getDataLayout());
9735     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
9736                            &CM, BFI, PSI, Checks);
9737     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
9738                       << L->getHeader()->getParent()->getName() << "\"\n");
9739     LVP.executePlan(LB, DT);
9740   }
9741 
9742   // Mark the loop as already vectorized to avoid vectorizing again.
9743   Hints.setAlreadyVectorized();
9744   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
9745   return true;
9746 }
9747 
9748 // Emit a remark if there are stores to floats that required a floating point
9749 // extension. If the vectorized loop was generated with floating point there
9750 // will be a performance penalty from the conversion overhead and the change in
9751 // the vector width.
9752 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
9753   SmallVector<Instruction *, 4> Worklist;
9754   for (BasicBlock *BB : L->getBlocks()) {
9755     for (Instruction &Inst : *BB) {
9756       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
9757         if (S->getValueOperand()->getType()->isFloatTy())
9758           Worklist.push_back(S);
9759       }
9760     }
9761   }
9762 
9763   // Traverse the floating point stores upwards searching, for floating point
9764   // conversions.
9765   SmallPtrSet<const Instruction *, 4> Visited;
9766   SmallPtrSet<const Instruction *, 4> EmittedRemark;
9767   while (!Worklist.empty()) {
9768     auto *I = Worklist.pop_back_val();
9769     if (!L->contains(I))
9770       continue;
9771     if (!Visited.insert(I).second)
9772       continue;
9773 
9774     // Emit a remark if the floating point store required a floating
9775     // point conversion.
9776     // TODO: More work could be done to identify the root cause such as a
9777     // constant or a function return type and point the user to it.
9778     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
9779       ORE->emit([&]() {
9780         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
9781                                           I->getDebugLoc(), L->getHeader())
9782                << "floating point conversion changes vector width. "
9783                << "Mixed floating point precision requires an up/down "
9784                << "cast that will negatively impact performance.";
9785       });
9786 
9787     for (Use &Op : I->operands())
9788       if (auto *OpI = dyn_cast<Instruction>(Op))
9789         Worklist.push_back(OpI);
9790   }
9791 }
9792 
9793 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
9794     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
9795                                !EnableLoopInterleaving),
9796       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
9797                               !EnableLoopVectorization) {}
9798 
9799 bool LoopVectorizePass::processLoop(Loop *L) {
9800   assert((EnableVPlanNativePath || L->isInnermost()) &&
9801          "VPlan-native path is not enabled. Only process inner loops.");
9802 
9803 #ifndef NDEBUG
9804   const std::string DebugLocStr = getDebugLocString(L);
9805 #endif /* NDEBUG */
9806 
9807   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
9808                     << L->getHeader()->getParent()->getName() << "\" from "
9809                     << DebugLocStr << "\n");
9810 
9811   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
9812 
9813   LLVM_DEBUG(
9814       dbgs() << "LV: Loop hints:"
9815              << " force="
9816              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
9817                      ? "disabled"
9818                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
9819                             ? "enabled"
9820                             : "?"))
9821              << " width=" << Hints.getWidth()
9822              << " interleave=" << Hints.getInterleave() << "\n");
9823 
9824   // Function containing loop
9825   Function *F = L->getHeader()->getParent();
9826 
9827   // Looking at the diagnostic output is the only way to determine if a loop
9828   // was vectorized (other than looking at the IR or machine code), so it
9829   // is important to generate an optimization remark for each loop. Most of
9830   // these messages are generated as OptimizationRemarkAnalysis. Remarks
9831   // generated as OptimizationRemark and OptimizationRemarkMissed are
9832   // less verbose reporting vectorized loops and unvectorized loops that may
9833   // benefit from vectorization, respectively.
9834 
9835   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
9836     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
9837     return false;
9838   }
9839 
9840   PredicatedScalarEvolution PSE(*SE, *L);
9841 
9842   // Check if it is legal to vectorize the loop.
9843   LoopVectorizationRequirements Requirements;
9844   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
9845                                 &Requirements, &Hints, DB, AC, BFI, PSI);
9846   if (!LVL.canVectorize(EnableVPlanNativePath)) {
9847     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
9848     Hints.emitRemarkWithHints();
9849     return false;
9850   }
9851 
9852   // Check the function attributes and profiles to find out if this function
9853   // should be optimized for size.
9854   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
9855       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
9856 
9857   // Entrance to the VPlan-native vectorization path. Outer loops are processed
9858   // here. They may require CFG and instruction level transformations before
9859   // even evaluating whether vectorization is profitable. Since we cannot modify
9860   // the incoming IR, we need to build VPlan upfront in the vectorization
9861   // pipeline.
9862   if (!L->isInnermost())
9863     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
9864                                         ORE, BFI, PSI, Hints, Requirements);
9865 
9866   assert(L->isInnermost() && "Inner loop expected.");
9867 
9868   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
9869   // count by optimizing for size, to minimize overheads.
9870   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
9871   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
9872     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
9873                       << "This loop is worth vectorizing only if no scalar "
9874                       << "iteration overheads are incurred.");
9875     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
9876       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
9877     else {
9878       LLVM_DEBUG(dbgs() << "\n");
9879       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
9880     }
9881   }
9882 
9883   // Check the function attributes to see if implicit floats are allowed.
9884   // FIXME: This check doesn't seem possibly correct -- what if the loop is
9885   // an integer loop and the vector instructions selected are purely integer
9886   // vector instructions?
9887   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
9888     reportVectorizationFailure(
9889         "Can't vectorize when the NoImplicitFloat attribute is used",
9890         "loop not vectorized due to NoImplicitFloat attribute",
9891         "NoImplicitFloat", ORE, L);
9892     Hints.emitRemarkWithHints();
9893     return false;
9894   }
9895 
9896   // Check if the target supports potentially unsafe FP vectorization.
9897   // FIXME: Add a check for the type of safety issue (denormal, signaling)
9898   // for the target we're vectorizing for, to make sure none of the
9899   // additional fp-math flags can help.
9900   if (Hints.isPotentiallyUnsafe() &&
9901       TTI->isFPVectorizationPotentiallyUnsafe()) {
9902     reportVectorizationFailure(
9903         "Potentially unsafe FP op prevents vectorization",
9904         "loop not vectorized due to unsafe FP support.",
9905         "UnsafeFP", ORE, L);
9906     Hints.emitRemarkWithHints();
9907     return false;
9908   }
9909 
9910   if (!Requirements.canVectorizeFPMath(Hints)) {
9911     ORE->emit([&]() {
9912       auto *ExactFPMathInst = Requirements.getExactFPInst();
9913       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
9914                                                  ExactFPMathInst->getDebugLoc(),
9915                                                  ExactFPMathInst->getParent())
9916              << "loop not vectorized: cannot prove it is safe to reorder "
9917                 "floating-point operations";
9918     });
9919     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
9920                          "reorder floating-point operations\n");
9921     Hints.emitRemarkWithHints();
9922     return false;
9923   }
9924 
9925   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
9926   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
9927 
9928   // If an override option has been passed in for interleaved accesses, use it.
9929   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
9930     UseInterleaved = EnableInterleavedMemAccesses;
9931 
9932   // Analyze interleaved memory accesses.
9933   if (UseInterleaved) {
9934     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
9935   }
9936 
9937   // Use the cost model.
9938   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
9939                                 F, &Hints, IAI);
9940   CM.collectValuesToIgnore();
9941 
9942   // Use the planner for vectorization.
9943   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
9944                                Requirements, ORE);
9945 
9946   // Get user vectorization factor and interleave count.
9947   ElementCount UserVF = Hints.getWidth();
9948   unsigned UserIC = Hints.getInterleave();
9949 
9950   // Plan how to best vectorize, return the best VF and its cost.
9951   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
9952 
9953   VectorizationFactor VF = VectorizationFactor::Disabled();
9954   unsigned IC = 1;
9955 
9956   if (MaybeVF) {
9957     VF = *MaybeVF;
9958     // Select the interleave count.
9959     IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue());
9960   }
9961 
9962   // Identify the diagnostic messages that should be produced.
9963   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
9964   bool VectorizeLoop = true, InterleaveLoop = true;
9965   if (VF.Width.isScalar()) {
9966     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
9967     VecDiagMsg = std::make_pair(
9968         "VectorizationNotBeneficial",
9969         "the cost-model indicates that vectorization is not beneficial");
9970     VectorizeLoop = false;
9971   }
9972 
9973   if (!MaybeVF && UserIC > 1) {
9974     // Tell the user interleaving was avoided up-front, despite being explicitly
9975     // requested.
9976     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
9977                          "interleaving should be avoided up front\n");
9978     IntDiagMsg = std::make_pair(
9979         "InterleavingAvoided",
9980         "Ignoring UserIC, because interleaving was avoided up front");
9981     InterleaveLoop = false;
9982   } else if (IC == 1 && UserIC <= 1) {
9983     // Tell the user interleaving is not beneficial.
9984     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
9985     IntDiagMsg = std::make_pair(
9986         "InterleavingNotBeneficial",
9987         "the cost-model indicates that interleaving is not beneficial");
9988     InterleaveLoop = false;
9989     if (UserIC == 1) {
9990       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
9991       IntDiagMsg.second +=
9992           " and is explicitly disabled or interleave count is set to 1";
9993     }
9994   } else if (IC > 1 && UserIC == 1) {
9995     // Tell the user interleaving is beneficial, but it explicitly disabled.
9996     LLVM_DEBUG(
9997         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
9998     IntDiagMsg = std::make_pair(
9999         "InterleavingBeneficialButDisabled",
10000         "the cost-model indicates that interleaving is beneficial "
10001         "but is explicitly disabled or interleave count is set to 1");
10002     InterleaveLoop = false;
10003   }
10004 
10005   // Override IC if user provided an interleave count.
10006   IC = UserIC > 0 ? UserIC : IC;
10007 
10008   // Emit diagnostic messages, if any.
10009   const char *VAPassName = Hints.vectorizeAnalysisPassName();
10010   if (!VectorizeLoop && !InterleaveLoop) {
10011     // Do not vectorize or interleaving the loop.
10012     ORE->emit([&]() {
10013       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10014                                       L->getStartLoc(), L->getHeader())
10015              << VecDiagMsg.second;
10016     });
10017     ORE->emit([&]() {
10018       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10019                                       L->getStartLoc(), L->getHeader())
10020              << IntDiagMsg.second;
10021     });
10022     return false;
10023   } else if (!VectorizeLoop && InterleaveLoop) {
10024     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10025     ORE->emit([&]() {
10026       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10027                                         L->getStartLoc(), L->getHeader())
10028              << VecDiagMsg.second;
10029     });
10030   } else if (VectorizeLoop && !InterleaveLoop) {
10031     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10032                       << ") in " << DebugLocStr << '\n');
10033     ORE->emit([&]() {
10034       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10035                                         L->getStartLoc(), L->getHeader())
10036              << IntDiagMsg.second;
10037     });
10038   } else if (VectorizeLoop && InterleaveLoop) {
10039     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10040                       << ") in " << DebugLocStr << '\n');
10041     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10042   }
10043 
10044   bool DisableRuntimeUnroll = false;
10045   MDNode *OrigLoopID = L->getLoopID();
10046   {
10047     // Optimistically generate runtime checks. Drop them if they turn out to not
10048     // be profitable. Limit the scope of Checks, so the cleanup happens
10049     // immediately after vector codegeneration is done.
10050     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10051                              F->getParent()->getDataLayout());
10052     if (!VF.Width.isScalar() || IC > 1)
10053       Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate());
10054     LVP.setBestPlan(VF.Width, IC);
10055 
10056     using namespace ore;
10057     if (!VectorizeLoop) {
10058       assert(IC > 1 && "interleave count should not be 1 or 0");
10059       // If we decided that it is not legal to vectorize the loop, then
10060       // interleave it.
10061       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
10062                                  &CM, BFI, PSI, Checks);
10063       LVP.executePlan(Unroller, DT);
10064 
10065       ORE->emit([&]() {
10066         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10067                                   L->getHeader())
10068                << "interleaved loop (interleaved count: "
10069                << NV("InterleaveCount", IC) << ")";
10070       });
10071     } else {
10072       // If we decided that it is *legal* to vectorize the loop, then do it.
10073 
10074       // Consider vectorizing the epilogue too if it's profitable.
10075       VectorizationFactor EpilogueVF =
10076           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
10077       if (EpilogueVF.Width.isVector()) {
10078 
10079         // The first pass vectorizes the main loop and creates a scalar epilogue
10080         // to be vectorized by executing the plan (potentially with a different
10081         // factor) again shortly afterwards.
10082         EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC,
10083                                           EpilogueVF.Width.getKnownMinValue(),
10084                                           1);
10085         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
10086                                            EPI, &LVL, &CM, BFI, PSI, Checks);
10087 
10088         LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF);
10089         LVP.executePlan(MainILV, DT);
10090         ++LoopsVectorized;
10091 
10092         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10093         formLCSSARecursively(*L, *DT, LI, SE);
10094 
10095         // Second pass vectorizes the epilogue and adjusts the control flow
10096         // edges from the first pass.
10097         LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF);
10098         EPI.MainLoopVF = EPI.EpilogueVF;
10099         EPI.MainLoopUF = EPI.EpilogueUF;
10100         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
10101                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
10102                                                  Checks);
10103         LVP.executePlan(EpilogILV, DT);
10104         ++LoopsEpilogueVectorized;
10105 
10106         if (!MainILV.areSafetyChecksAdded())
10107           DisableRuntimeUnroll = true;
10108       } else {
10109         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
10110                                &LVL, &CM, BFI, PSI, Checks);
10111         LVP.executePlan(LB, DT);
10112         ++LoopsVectorized;
10113 
10114         // Add metadata to disable runtime unrolling a scalar loop when there
10115         // are no runtime checks about strides and memory. A scalar loop that is
10116         // rarely used is not worth unrolling.
10117         if (!LB.areSafetyChecksAdded())
10118           DisableRuntimeUnroll = true;
10119       }
10120       // Report the vectorization decision.
10121       ORE->emit([&]() {
10122         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
10123                                   L->getHeader())
10124                << "vectorized loop (vectorization width: "
10125                << NV("VectorizationFactor", VF.Width)
10126                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
10127       });
10128     }
10129 
10130     if (ORE->allowExtraAnalysis(LV_NAME))
10131       checkMixedPrecision(L, ORE);
10132   }
10133 
10134   Optional<MDNode *> RemainderLoopID =
10135       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
10136                                       LLVMLoopVectorizeFollowupEpilogue});
10137   if (RemainderLoopID.hasValue()) {
10138     L->setLoopID(RemainderLoopID.getValue());
10139   } else {
10140     if (DisableRuntimeUnroll)
10141       AddRuntimeUnrollDisableMetaData(L);
10142 
10143     // Mark the loop as already vectorized to avoid vectorizing again.
10144     Hints.setAlreadyVectorized();
10145   }
10146 
10147   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10148   return true;
10149 }
10150 
10151 LoopVectorizeResult LoopVectorizePass::runImpl(
10152     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
10153     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
10154     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
10155     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
10156     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
10157   SE = &SE_;
10158   LI = &LI_;
10159   TTI = &TTI_;
10160   DT = &DT_;
10161   BFI = &BFI_;
10162   TLI = TLI_;
10163   AA = &AA_;
10164   AC = &AC_;
10165   GetLAA = &GetLAA_;
10166   DB = &DB_;
10167   ORE = &ORE_;
10168   PSI = PSI_;
10169 
10170   // Don't attempt if
10171   // 1. the target claims to have no vector registers, and
10172   // 2. interleaving won't help ILP.
10173   //
10174   // The second condition is necessary because, even if the target has no
10175   // vector registers, loop vectorization may still enable scalar
10176   // interleaving.
10177   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10178       TTI->getMaxInterleaveFactor(1) < 2)
10179     return LoopVectorizeResult(false, false);
10180 
10181   bool Changed = false, CFGChanged = false;
10182 
10183   // The vectorizer requires loops to be in simplified form.
10184   // Since simplification may add new inner loops, it has to run before the
10185   // legality and profitability checks. This means running the loop vectorizer
10186   // will simplify all loops, regardless of whether anything end up being
10187   // vectorized.
10188   for (auto &L : *LI)
10189     Changed |= CFGChanged |=
10190         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10191 
10192   // Build up a worklist of inner-loops to vectorize. This is necessary as
10193   // the act of vectorizing or partially unrolling a loop creates new loops
10194   // and can invalidate iterators across the loops.
10195   SmallVector<Loop *, 8> Worklist;
10196 
10197   for (Loop *L : *LI)
10198     collectSupportedLoops(*L, LI, ORE, Worklist);
10199 
10200   LoopsAnalyzed += Worklist.size();
10201 
10202   // Now walk the identified inner loops.
10203   while (!Worklist.empty()) {
10204     Loop *L = Worklist.pop_back_val();
10205 
10206     // For the inner loops we actually process, form LCSSA to simplify the
10207     // transform.
10208     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10209 
10210     Changed |= CFGChanged |= processLoop(L);
10211   }
10212 
10213   // Process each loop nest in the function.
10214   return LoopVectorizeResult(Changed, CFGChanged);
10215 }
10216 
10217 PreservedAnalyses LoopVectorizePass::run(Function &F,
10218                                          FunctionAnalysisManager &AM) {
10219     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10220     auto &LI = AM.getResult<LoopAnalysis>(F);
10221     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10222     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10223     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10224     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10225     auto &AA = AM.getResult<AAManager>(F);
10226     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10227     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10228     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10229     MemorySSA *MSSA = EnableMSSALoopDependency
10230                           ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
10231                           : nullptr;
10232 
10233     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10234     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10235         [&](Loop &L) -> const LoopAccessInfo & {
10236       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,  SE,
10237                                         TLI, TTI, nullptr, MSSA};
10238       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10239     };
10240     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10241     ProfileSummaryInfo *PSI =
10242         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10243     LoopVectorizeResult Result =
10244         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10245     if (!Result.MadeAnyChange)
10246       return PreservedAnalyses::all();
10247     PreservedAnalyses PA;
10248 
10249     // We currently do not preserve loopinfo/dominator analyses with outer loop
10250     // vectorization. Until this is addressed, mark these analyses as preserved
10251     // only for non-VPlan-native path.
10252     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10253     if (!EnableVPlanNativePath) {
10254       PA.preserve<LoopAnalysis>();
10255       PA.preserve<DominatorTreeAnalysis>();
10256     }
10257     PA.preserve<BasicAA>();
10258     PA.preserve<GlobalsAA>();
10259     if (!Result.MadeCFGChange)
10260       PA.preserveSet<CFGAnalyses>();
10261     return PA;
10262 }
10263