1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/MemorySSA.h"
91 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
92 #include "llvm/Analysis/ProfileSummaryInfo.h"
93 #include "llvm/Analysis/ScalarEvolution.h"
94 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
95 #include "llvm/Analysis/TargetLibraryInfo.h"
96 #include "llvm/Analysis/TargetTransformInfo.h"
97 #include "llvm/Analysis/VectorUtils.h"
98 #include "llvm/IR/Attributes.h"
99 #include "llvm/IR/BasicBlock.h"
100 #include "llvm/IR/CFG.h"
101 #include "llvm/IR/Constant.h"
102 #include "llvm/IR/Constants.h"
103 #include "llvm/IR/DataLayout.h"
104 #include "llvm/IR/DebugInfoMetadata.h"
105 #include "llvm/IR/DebugLoc.h"
106 #include "llvm/IR/DerivedTypes.h"
107 #include "llvm/IR/DiagnosticInfo.h"
108 #include "llvm/IR/Dominators.h"
109 #include "llvm/IR/Function.h"
110 #include "llvm/IR/IRBuilder.h"
111 #include "llvm/IR/InstrTypes.h"
112 #include "llvm/IR/Instruction.h"
113 #include "llvm/IR/Instructions.h"
114 #include "llvm/IR/IntrinsicInst.h"
115 #include "llvm/IR/Intrinsics.h"
116 #include "llvm/IR/LLVMContext.h"
117 #include "llvm/IR/Metadata.h"
118 #include "llvm/IR/Module.h"
119 #include "llvm/IR/Operator.h"
120 #include "llvm/IR/Type.h"
121 #include "llvm/IR/Use.h"
122 #include "llvm/IR/User.h"
123 #include "llvm/IR/Value.h"
124 #include "llvm/IR/ValueHandle.h"
125 #include "llvm/IR/Verifier.h"
126 #include "llvm/InitializePasses.h"
127 #include "llvm/Pass.h"
128 #include "llvm/Support/Casting.h"
129 #include "llvm/Support/CommandLine.h"
130 #include "llvm/Support/Compiler.h"
131 #include "llvm/Support/Debug.h"
132 #include "llvm/Support/ErrorHandling.h"
133 #include "llvm/Support/MathExtras.h"
134 #include "llvm/Support/raw_ostream.h"
135 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
136 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
137 #include "llvm/Transforms/Utils/LoopSimplify.h"
138 #include "llvm/Transforms/Utils/LoopUtils.h"
139 #include "llvm/Transforms/Utils/LoopVersioning.h"
140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
141 #include "llvm/Transforms/Utils/SizeOpts.h"
142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
143 #include <algorithm>
144 #include <cassert>
145 #include <cstdint>
146 #include <cstdlib>
147 #include <functional>
148 #include <iterator>
149 #include <limits>
150 #include <memory>
151 #include <string>
152 #include <tuple>
153 #include <utility>
154 
155 using namespace llvm;
156 
157 #define LV_NAME "loop-vectorize"
158 #define DEBUG_TYPE LV_NAME
159 
160 /// @{
161 /// Metadata attribute names
162 static const char *const LLVMLoopVectorizeFollowupAll =
163     "llvm.loop.vectorize.followup_all";
164 static const char *const LLVMLoopVectorizeFollowupVectorized =
165     "llvm.loop.vectorize.followup_vectorized";
166 static const char *const LLVMLoopVectorizeFollowupEpilogue =
167     "llvm.loop.vectorize.followup_epilogue";
168 /// @}
169 
170 STATISTIC(LoopsVectorized, "Number of loops vectorized");
171 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
172 
173 /// Loops with a known constant trip count below this number are vectorized only
174 /// if no scalar iteration overheads are incurred.
175 static cl::opt<unsigned> TinyTripCountVectorThreshold(
176     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
177     cl::desc("Loops with a constant trip count that is smaller than this "
178              "value are vectorized only if no scalar iteration overheads "
179              "are incurred."));
180 
181 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
182 // that predication is preferred, and this lists all options. I.e., the
183 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
184 // and predicate the instructions accordingly. If tail-folding fails, there are
185 // different fallback strategies depending on these values:
186 namespace PreferPredicateTy {
187   enum Option {
188     ScalarEpilogue = 0,
189     PredicateElseScalarEpilogue,
190     PredicateOrDontVectorize
191   };
192 } // namespace PreferPredicateTy
193 
194 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
195     "prefer-predicate-over-epilogue",
196     cl::init(PreferPredicateTy::ScalarEpilogue),
197     cl::Hidden,
198     cl::desc("Tail-folding and predication preferences over creating a scalar "
199              "epilogue loop."),
200     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
201                          "scalar-epilogue",
202                          "Don't tail-predicate loops, create scalar epilogue"),
203               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
204                          "predicate-else-scalar-epilogue",
205                          "prefer tail-folding, create scalar epilogue if tail "
206                          "folding fails."),
207               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
208                          "predicate-dont-vectorize",
209                          "prefers tail-folding, don't attempt vectorization if "
210                          "tail-folding fails.")));
211 
212 static cl::opt<bool> MaximizeBandwidth(
213     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
214     cl::desc("Maximize bandwidth when selecting vectorization factor which "
215              "will be determined by the smallest type in loop."));
216 
217 static cl::opt<bool> EnableInterleavedMemAccesses(
218     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
219     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
220 
221 /// An interleave-group may need masking if it resides in a block that needs
222 /// predication, or in order to mask away gaps.
223 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
224     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
225     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
226 
227 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
228     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
229     cl::desc("We don't interleave loops with a estimated constant trip count "
230              "below this number"));
231 
232 static cl::opt<unsigned> ForceTargetNumScalarRegs(
233     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
234     cl::desc("A flag that overrides the target's number of scalar registers."));
235 
236 static cl::opt<unsigned> ForceTargetNumVectorRegs(
237     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
238     cl::desc("A flag that overrides the target's number of vector registers."));
239 
240 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
241     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
242     cl::desc("A flag that overrides the target's max interleave factor for "
243              "scalar loops."));
244 
245 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
246     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
247     cl::desc("A flag that overrides the target's max interleave factor for "
248              "vectorized loops."));
249 
250 static cl::opt<unsigned> ForceTargetInstructionCost(
251     "force-target-instruction-cost", cl::init(0), cl::Hidden,
252     cl::desc("A flag that overrides the target's expected cost for "
253              "an instruction to a single constant value. Mostly "
254              "useful for getting consistent testing."));
255 
256 static cl::opt<unsigned> SmallLoopCost(
257     "small-loop-cost", cl::init(20), cl::Hidden,
258     cl::desc(
259         "The cost of a loop that is considered 'small' by the interleaver."));
260 
261 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
262     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
263     cl::desc("Enable the use of the block frequency analysis to access PGO "
264              "heuristics minimizing code growth in cold regions and being more "
265              "aggressive in hot regions."));
266 
267 // Runtime interleave loops for load/store throughput.
268 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
269     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
270     cl::desc(
271         "Enable runtime interleaving until load/store ports are saturated"));
272 
273 /// Interleave small loops with scalar reductions.
274 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
275     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
276     cl::desc("Enable interleaving for loops with small iteration counts that "
277              "contain scalar reductions to expose ILP."));
278 
279 /// The number of stores in a loop that are allowed to need predication.
280 static cl::opt<unsigned> NumberOfStoresToPredicate(
281     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
282     cl::desc("Max number of stores to be predicated behind an if."));
283 
284 static cl::opt<bool> EnableIndVarRegisterHeur(
285     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
286     cl::desc("Count the induction variable only once when interleaving"));
287 
288 static cl::opt<bool> EnableCondStoresVectorization(
289     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
290     cl::desc("Enable if predication of stores during vectorization."));
291 
292 static cl::opt<unsigned> MaxNestedScalarReductionIC(
293     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
294     cl::desc("The maximum interleave count to use when interleaving a scalar "
295              "reduction in a nested loop."));
296 
297 static cl::opt<bool>
298     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
299                            cl::Hidden,
300                            cl::desc("Prefer in-loop vector reductions, "
301                                     "overriding the targets preference."));
302 
303 static cl::opt<bool> PreferPredicatedReductionSelect(
304     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
305     cl::desc(
306         "Prefer predicating a reduction operation over an after loop select."));
307 
308 cl::opt<bool> EnableVPlanNativePath(
309     "enable-vplan-native-path", cl::init(false), cl::Hidden,
310     cl::desc("Enable VPlan-native vectorization path with "
311              "support for outer loop vectorization."));
312 
313 // FIXME: Remove this switch once we have divergence analysis. Currently we
314 // assume divergent non-backedge branches when this switch is true.
315 cl::opt<bool> EnableVPlanPredication(
316     "enable-vplan-predication", cl::init(false), cl::Hidden,
317     cl::desc("Enable VPlan-native vectorization path predicator with "
318              "support for outer loop vectorization."));
319 
320 // This flag enables the stress testing of the VPlan H-CFG construction in the
321 // VPlan-native vectorization path. It must be used in conjuction with
322 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
323 // verification of the H-CFGs built.
324 static cl::opt<bool> VPlanBuildStressTest(
325     "vplan-build-stress-test", cl::init(false), cl::Hidden,
326     cl::desc(
327         "Build VPlan for every supported loop nest in the function and bail "
328         "out right after the build (stress test the VPlan H-CFG construction "
329         "in the VPlan-native vectorization path)."));
330 
331 cl::opt<bool> llvm::EnableLoopInterleaving(
332     "interleave-loops", cl::init(true), cl::Hidden,
333     cl::desc("Enable loop interleaving in Loop vectorization passes"));
334 cl::opt<bool> llvm::EnableLoopVectorization(
335     "vectorize-loops", cl::init(true), cl::Hidden,
336     cl::desc("Run the Loop vectorization passes"));
337 
338 /// A helper function that returns the type of loaded or stored value.
339 static Type *getMemInstValueType(Value *I) {
340   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
341          "Expected Load or Store instruction");
342   if (auto *LI = dyn_cast<LoadInst>(I))
343     return LI->getType();
344   return cast<StoreInst>(I)->getValueOperand()->getType();
345 }
346 
347 /// A helper function that returns true if the given type is irregular. The
348 /// type is irregular if its allocated size doesn't equal the store size of an
349 /// element of the corresponding vector type at the given vectorization factor.
350 static bool hasIrregularType(Type *Ty, const DataLayout &DL, ElementCount VF) {
351   assert(!VF.isScalable() && "scalable vectors not yet supported.");
352   // Determine if an array of VF elements of type Ty is "bitcast compatible"
353   // with a <VF x Ty> vector.
354   if (VF.isVector()) {
355     auto *VectorTy = VectorType::get(Ty, VF);
356     return TypeSize::get(VF.getKnownMinValue() *
357                              DL.getTypeAllocSize(Ty).getFixedValue(),
358                          VF.isScalable()) != DL.getTypeStoreSize(VectorTy);
359   }
360 
361   // If the vectorization factor is one, we just check if an array of type Ty
362   // requires padding between elements.
363   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
364 }
365 
366 /// A helper function that returns the reciprocal of the block probability of
367 /// predicated blocks. If we return X, we are assuming the predicated block
368 /// will execute once for every X iterations of the loop header.
369 ///
370 /// TODO: We should use actual block probability here, if available. Currently,
371 ///       we always assume predicated blocks have a 50% chance of executing.
372 static unsigned getReciprocalPredBlockProb() { return 2; }
373 
374 /// A helper function that adds a 'fast' flag to floating-point operations.
375 static Value *addFastMathFlag(Value *V) {
376   if (isa<FPMathOperator>(V))
377     cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast());
378   return V;
379 }
380 
381 static Value *addFastMathFlag(Value *V, FastMathFlags FMF) {
382   if (isa<FPMathOperator>(V))
383     cast<Instruction>(V)->setFastMathFlags(FMF);
384   return V;
385 }
386 
387 /// A helper function that returns an integer or floating-point constant with
388 /// value C.
389 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
390   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
391                            : ConstantFP::get(Ty, C);
392 }
393 
394 /// Returns "best known" trip count for the specified loop \p L as defined by
395 /// the following procedure:
396 ///   1) Returns exact trip count if it is known.
397 ///   2) Returns expected trip count according to profile data if any.
398 ///   3) Returns upper bound estimate if it is known.
399 ///   4) Returns None if all of the above failed.
400 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
401   // Check if exact trip count is known.
402   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
403     return ExpectedTC;
404 
405   // Check if there is an expected trip count available from profile data.
406   if (LoopVectorizeWithBlockFrequency)
407     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
408       return EstimatedTC;
409 
410   // Check if upper bound estimate is known.
411   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
412     return ExpectedTC;
413 
414   return None;
415 }
416 
417 namespace llvm {
418 
419 /// InnerLoopVectorizer vectorizes loops which contain only one basic
420 /// block to a specified vectorization factor (VF).
421 /// This class performs the widening of scalars into vectors, or multiple
422 /// scalars. This class also implements the following features:
423 /// * It inserts an epilogue loop for handling loops that don't have iteration
424 ///   counts that are known to be a multiple of the vectorization factor.
425 /// * It handles the code generation for reduction variables.
426 /// * Scalarization (implementation using scalars) of un-vectorizable
427 ///   instructions.
428 /// InnerLoopVectorizer does not perform any vectorization-legality
429 /// checks, and relies on the caller to check for the different legality
430 /// aspects. The InnerLoopVectorizer relies on the
431 /// LoopVectorizationLegality class to provide information about the induction
432 /// and reduction variables that were found to a given vectorization factor.
433 class InnerLoopVectorizer {
434 public:
435   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
436                       LoopInfo *LI, DominatorTree *DT,
437                       const TargetLibraryInfo *TLI,
438                       const TargetTransformInfo *TTI, AssumptionCache *AC,
439                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
440                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
441                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
442                       ProfileSummaryInfo *PSI)
443       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
444         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
445         Builder(PSE.getSE()->getContext()),
446         VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM),
447         BFI(BFI), PSI(PSI) {
448     // Query this against the original loop and save it here because the profile
449     // of the original loop header may change as the transformation happens.
450     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
451         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
452   }
453 
454   virtual ~InnerLoopVectorizer() = default;
455 
456   /// Create a new empty loop that will contain vectorized instructions later
457   /// on, while the old loop will be used as the scalar remainder. Control flow
458   /// is generated around the vectorized (and scalar epilogue) loops consisting
459   /// of various checks and bypasses. Return the pre-header block of the new
460   /// loop.
461   BasicBlock *createVectorizedLoopSkeleton();
462 
463   /// Widen a single instruction within the innermost loop.
464   void widenInstruction(Instruction &I, VPUser &Operands,
465                         VPTransformState &State);
466 
467   /// Widen a single call instruction within the innermost loop.
468   void widenCallInstruction(CallInst &I, VPUser &ArgOperands,
469                             VPTransformState &State);
470 
471   /// Widen a single select instruction within the innermost loop.
472   void widenSelectInstruction(SelectInst &I, VPUser &Operands,
473                               bool InvariantCond, VPTransformState &State);
474 
475   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
476   void fixVectorizedLoop();
477 
478   // Return true if any runtime check is added.
479   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
480 
481   /// A type for vectorized values in the new loop. Each value from the
482   /// original loop, when vectorized, is represented by UF vector values in the
483   /// new unrolled loop, where UF is the unroll factor.
484   using VectorParts = SmallVector<Value *, 2>;
485 
486   /// Vectorize a single GetElementPtrInst based on information gathered and
487   /// decisions taken during planning.
488   void widenGEP(GetElementPtrInst *GEP, VPUser &Indices, unsigned UF,
489                 ElementCount VF, bool IsPtrLoopInvariant,
490                 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State);
491 
492   /// Vectorize a single PHINode in a block. This method handles the induction
493   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
494   /// arbitrary length vectors.
495   void widenPHIInstruction(Instruction *PN, unsigned UF, ElementCount VF);
496 
497   /// A helper function to scalarize a single Instruction in the innermost loop.
498   /// Generates a sequence of scalar instances for each lane between \p MinLane
499   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
500   /// inclusive. Uses the VPValue operands from \p Operands instead of \p
501   /// Instr's operands.
502   void scalarizeInstruction(Instruction *Instr, VPUser &Operands,
503                             const VPIteration &Instance, bool IfPredicateInstr,
504                             VPTransformState &State);
505 
506   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
507   /// is provided, the integer induction variable will first be truncated to
508   /// the corresponding type.
509   void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr);
510 
511   /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a
512   /// vector or scalar value on-demand if one is not yet available. When
513   /// vectorizing a loop, we visit the definition of an instruction before its
514   /// uses. When visiting the definition, we either vectorize or scalarize the
515   /// instruction, creating an entry for it in the corresponding map. (In some
516   /// cases, such as induction variables, we will create both vector and scalar
517   /// entries.) Then, as we encounter uses of the definition, we derive values
518   /// for each scalar or vector use unless such a value is already available.
519   /// For example, if we scalarize a definition and one of its uses is vector,
520   /// we build the required vector on-demand with an insertelement sequence
521   /// when visiting the use. Otherwise, if the use is scalar, we can use the
522   /// existing scalar definition.
523   ///
524   /// Return a value in the new loop corresponding to \p V from the original
525   /// loop at unroll index \p Part. If the value has already been vectorized,
526   /// the corresponding vector entry in VectorLoopValueMap is returned. If,
527   /// however, the value has a scalar entry in VectorLoopValueMap, we construct
528   /// a new vector value on-demand by inserting the scalar values into a vector
529   /// with an insertelement sequence. If the value has been neither vectorized
530   /// nor scalarized, it must be loop invariant, so we simply broadcast the
531   /// value into a vector.
532   Value *getOrCreateVectorValue(Value *V, unsigned Part);
533 
534   void setVectorValue(Value *Scalar, unsigned Part, Value *Vector) {
535     VectorLoopValueMap.setVectorValue(Scalar, Part, Vector);
536   }
537 
538   /// Return a value in the new loop corresponding to \p V from the original
539   /// loop at unroll and vector indices \p Instance. If the value has been
540   /// vectorized but not scalarized, the necessary extractelement instruction
541   /// will be generated.
542   Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance);
543 
544   /// Construct the vector value of a scalarized value \p V one lane at a time.
545   void packScalarIntoVectorValue(Value *V, const VPIteration &Instance);
546 
547   /// Try to vectorize interleaved access group \p Group with the base address
548   /// given in \p Addr, optionally masking the vector operations if \p
549   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
550   /// values in the vectorized loop.
551   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
552                                 VPTransformState &State, VPValue *Addr,
553                                 VPValue *BlockInMask = nullptr);
554 
555   /// Vectorize Load and Store instructions with the base address given in \p
556   /// Addr, optionally masking the vector operations if \p BlockInMask is
557   /// non-null. Use \p State to translate given VPValues to IR values in the
558   /// vectorized loop.
559   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
560                                   VPValue *Def, VPValue *Addr,
561                                   VPValue *StoredValue, VPValue *BlockInMask);
562 
563   /// Set the debug location in the builder using the debug location in
564   /// the instruction.
565   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
566 
567   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
568   void fixNonInductionPHIs(void);
569 
570 protected:
571   friend class LoopVectorizationPlanner;
572 
573   /// A small list of PHINodes.
574   using PhiVector = SmallVector<PHINode *, 4>;
575 
576   /// A type for scalarized values in the new loop. Each value from the
577   /// original loop, when scalarized, is represented by UF x VF scalar values
578   /// in the new unrolled loop, where UF is the unroll factor and VF is the
579   /// vectorization factor.
580   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
581 
582   /// Set up the values of the IVs correctly when exiting the vector loop.
583   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
584                     Value *CountRoundDown, Value *EndValue,
585                     BasicBlock *MiddleBlock);
586 
587   /// Create a new induction variable inside L.
588   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
589                                    Value *Step, Instruction *DL);
590 
591   /// Handle all cross-iteration phis in the header.
592   void fixCrossIterationPHIs();
593 
594   /// Fix a first-order recurrence. This is the second phase of vectorizing
595   /// this phi node.
596   void fixFirstOrderRecurrence(PHINode *Phi);
597 
598   /// Fix a reduction cross-iteration phi. This is the second phase of
599   /// vectorizing this phi node.
600   void fixReduction(PHINode *Phi);
601 
602   /// Clear NSW/NUW flags from reduction instructions if necessary.
603   void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc);
604 
605   /// The Loop exit block may have single value PHI nodes with some
606   /// incoming value. While vectorizing we only handled real values
607   /// that were defined inside the loop and we should have one value for
608   /// each predecessor of its parent basic block. See PR14725.
609   void fixLCSSAPHIs();
610 
611   /// Iteratively sink the scalarized operands of a predicated instruction into
612   /// the block that was created for it.
613   void sinkScalarOperands(Instruction *PredInst);
614 
615   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
616   /// represented as.
617   void truncateToMinimalBitwidths();
618 
619   /// Create a broadcast instruction. This method generates a broadcast
620   /// instruction (shuffle) for loop invariant values and for the induction
621   /// value. If this is the induction variable then we extend it to N, N+1, ...
622   /// this is needed because each iteration in the loop corresponds to a SIMD
623   /// element.
624   virtual Value *getBroadcastInstrs(Value *V);
625 
626   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
627   /// to each vector element of Val. The sequence starts at StartIndex.
628   /// \p Opcode is relevant for FP induction variable.
629   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
630                                Instruction::BinaryOps Opcode =
631                                Instruction::BinaryOpsEnd);
632 
633   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
634   /// variable on which to base the steps, \p Step is the size of the step, and
635   /// \p EntryVal is the value from the original loop that maps to the steps.
636   /// Note that \p EntryVal doesn't have to be an induction variable - it
637   /// can also be a truncate instruction.
638   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
639                         const InductionDescriptor &ID);
640 
641   /// Create a vector induction phi node based on an existing scalar one. \p
642   /// EntryVal is the value from the original loop that maps to the vector phi
643   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
644   /// truncate instruction, instead of widening the original IV, we widen a
645   /// version of the IV truncated to \p EntryVal's type.
646   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
647                                        Value *Step, Instruction *EntryVal);
648 
649   /// Returns true if an instruction \p I should be scalarized instead of
650   /// vectorized for the chosen vectorization factor.
651   bool shouldScalarizeInstruction(Instruction *I) const;
652 
653   /// Returns true if we should generate a scalar version of \p IV.
654   bool needsScalarInduction(Instruction *IV) const;
655 
656   /// If there is a cast involved in the induction variable \p ID, which should
657   /// be ignored in the vectorized loop body, this function records the
658   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
659   /// cast. We had already proved that the casted Phi is equal to the uncasted
660   /// Phi in the vectorized loop (under a runtime guard), and therefore
661   /// there is no need to vectorize the cast - the same value can be used in the
662   /// vector loop for both the Phi and the cast.
663   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
664   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
665   ///
666   /// \p EntryVal is the value from the original loop that maps to the vector
667   /// phi node and is used to distinguish what is the IV currently being
668   /// processed - original one (if \p EntryVal is a phi corresponding to the
669   /// original IV) or the "newly-created" one based on the proof mentioned above
670   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
671   /// latter case \p EntryVal is a TruncInst and we must not record anything for
672   /// that IV, but it's error-prone to expect callers of this routine to care
673   /// about that, hence this explicit parameter.
674   void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID,
675                                              const Instruction *EntryVal,
676                                              Value *VectorLoopValue,
677                                              unsigned Part,
678                                              unsigned Lane = UINT_MAX);
679 
680   /// Generate a shuffle sequence that will reverse the vector Vec.
681   virtual Value *reverseVector(Value *Vec);
682 
683   /// Returns (and creates if needed) the original loop trip count.
684   Value *getOrCreateTripCount(Loop *NewLoop);
685 
686   /// Returns (and creates if needed) the trip count of the widened loop.
687   Value *getOrCreateVectorTripCount(Loop *NewLoop);
688 
689   /// Returns a bitcasted value to the requested vector type.
690   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
691   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
692                                 const DataLayout &DL);
693 
694   /// Emit a bypass check to see if the vector trip count is zero, including if
695   /// it overflows.
696   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
697 
698   /// Emit a bypass check to see if all of the SCEV assumptions we've
699   /// had to make are correct.
700   void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
701 
702   /// Emit bypass checks to check any memory assumptions we may have made.
703   void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
704 
705   /// Compute the transformed value of Index at offset StartValue using step
706   /// StepValue.
707   /// For integer induction, returns StartValue + Index * StepValue.
708   /// For pointer induction, returns StartValue[Index * StepValue].
709   /// FIXME: The newly created binary instructions should contain nsw/nuw
710   /// flags, which can be found from the original scalar operations.
711   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
712                               const DataLayout &DL,
713                               const InductionDescriptor &ID) const;
714 
715   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
716   /// vector loop preheader, middle block and scalar preheader. Also
717   /// allocate a loop object for the new vector loop and return it.
718   Loop *createVectorLoopSkeleton(StringRef Prefix);
719 
720   /// Create new phi nodes for the induction variables to resume iteration count
721   /// in the scalar epilogue, from where the vectorized loop left off (given by
722   /// \p VectorTripCount).
723   void createInductionResumeValues(Loop *L, Value *VectorTripCount);
724 
725   /// Complete the loop skeleton by adding debug MDs, creating appropriate
726   /// conditional branches in the middle block, preparing the builder and
727   /// running the verifier. Take in the vector loop \p L as argument, and return
728   /// the preheader of the completed vector loop.
729   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
730 
731   /// Add additional metadata to \p To that was not present on \p Orig.
732   ///
733   /// Currently this is used to add the noalias annotations based on the
734   /// inserted memchecks.  Use this for instructions that are *cloned* into the
735   /// vector loop.
736   void addNewMetadata(Instruction *To, const Instruction *Orig);
737 
738   /// Add metadata from one instruction to another.
739   ///
740   /// This includes both the original MDs from \p From and additional ones (\see
741   /// addNewMetadata).  Use this for *newly created* instructions in the vector
742   /// loop.
743   void addMetadata(Instruction *To, Instruction *From);
744 
745   /// Similar to the previous function but it adds the metadata to a
746   /// vector of instructions.
747   void addMetadata(ArrayRef<Value *> To, Instruction *From);
748 
749   /// The original loop.
750   Loop *OrigLoop;
751 
752   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
753   /// dynamic knowledge to simplify SCEV expressions and converts them to a
754   /// more usable form.
755   PredicatedScalarEvolution &PSE;
756 
757   /// Loop Info.
758   LoopInfo *LI;
759 
760   /// Dominator Tree.
761   DominatorTree *DT;
762 
763   /// Alias Analysis.
764   AAResults *AA;
765 
766   /// Target Library Info.
767   const TargetLibraryInfo *TLI;
768 
769   /// Target Transform Info.
770   const TargetTransformInfo *TTI;
771 
772   /// Assumption Cache.
773   AssumptionCache *AC;
774 
775   /// Interface to emit optimization remarks.
776   OptimizationRemarkEmitter *ORE;
777 
778   /// LoopVersioning.  It's only set up (non-null) if memchecks were
779   /// used.
780   ///
781   /// This is currently only used to add no-alias metadata based on the
782   /// memchecks.  The actually versioning is performed manually.
783   std::unique_ptr<LoopVersioning> LVer;
784 
785   /// The vectorization SIMD factor to use. Each vector will have this many
786   /// vector elements.
787   ElementCount VF;
788 
789   /// The vectorization unroll factor to use. Each scalar is vectorized to this
790   /// many different vector instructions.
791   unsigned UF;
792 
793   /// The builder that we use
794   IRBuilder<> Builder;
795 
796   // --- Vectorization state ---
797 
798   /// The vector-loop preheader.
799   BasicBlock *LoopVectorPreHeader;
800 
801   /// The scalar-loop preheader.
802   BasicBlock *LoopScalarPreHeader;
803 
804   /// Middle Block between the vector and the scalar.
805   BasicBlock *LoopMiddleBlock;
806 
807   /// The ExitBlock of the scalar loop.
808   BasicBlock *LoopExitBlock;
809 
810   /// The vector loop body.
811   BasicBlock *LoopVectorBody;
812 
813   /// The scalar loop body.
814   BasicBlock *LoopScalarBody;
815 
816   /// A list of all bypass blocks. The first block is the entry of the loop.
817   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
818 
819   /// The new Induction variable which was added to the new block.
820   PHINode *Induction = nullptr;
821 
822   /// The induction variable of the old basic block.
823   PHINode *OldInduction = nullptr;
824 
825   /// Maps values from the original loop to their corresponding values in the
826   /// vectorized loop. A key value can map to either vector values, scalar
827   /// values or both kinds of values, depending on whether the key was
828   /// vectorized and scalarized.
829   VectorizerValueMap VectorLoopValueMap;
830 
831   /// Store instructions that were predicated.
832   SmallVector<Instruction *, 4> PredicatedInstructions;
833 
834   /// Trip count of the original loop.
835   Value *TripCount = nullptr;
836 
837   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
838   Value *VectorTripCount = nullptr;
839 
840   /// The legality analysis.
841   LoopVectorizationLegality *Legal;
842 
843   /// The profitablity analysis.
844   LoopVectorizationCostModel *Cost;
845 
846   // Record whether runtime checks are added.
847   bool AddedSafetyChecks = false;
848 
849   // Holds the end values for each induction variable. We save the end values
850   // so we can later fix-up the external users of the induction variables.
851   DenseMap<PHINode *, Value *> IVEndValues;
852 
853   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
854   // fixed up at the end of vector code generation.
855   SmallVector<PHINode *, 8> OrigPHIsToFix;
856 
857   /// BFI and PSI are used to check for profile guided size optimizations.
858   BlockFrequencyInfo *BFI;
859   ProfileSummaryInfo *PSI;
860 
861   // Whether this loop should be optimized for size based on profile guided size
862   // optimizatios.
863   bool OptForSizeBasedOnProfile;
864 };
865 
866 class InnerLoopUnroller : public InnerLoopVectorizer {
867 public:
868   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
869                     LoopInfo *LI, DominatorTree *DT,
870                     const TargetLibraryInfo *TLI,
871                     const TargetTransformInfo *TTI, AssumptionCache *AC,
872                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
873                     LoopVectorizationLegality *LVL,
874                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
875                     ProfileSummaryInfo *PSI)
876       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
877                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
878                             BFI, PSI) {}
879 
880 private:
881   Value *getBroadcastInstrs(Value *V) override;
882   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
883                        Instruction::BinaryOps Opcode =
884                        Instruction::BinaryOpsEnd) override;
885   Value *reverseVector(Value *Vec) override;
886 };
887 
888 } // end namespace llvm
889 
890 /// Look for a meaningful debug location on the instruction or it's
891 /// operands.
892 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
893   if (!I)
894     return I;
895 
896   DebugLoc Empty;
897   if (I->getDebugLoc() != Empty)
898     return I;
899 
900   for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
901     if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
902       if (OpInst->getDebugLoc() != Empty)
903         return OpInst;
904   }
905 
906   return I;
907 }
908 
909 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
910   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
911     const DILocation *DIL = Inst->getDebugLoc();
912     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
913         !isa<DbgInfoIntrinsic>(Inst)) {
914       assert(!VF.isScalable() && "scalable vectors not yet supported.");
915       auto NewDIL =
916           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
917       if (NewDIL)
918         B.SetCurrentDebugLocation(NewDIL.getValue());
919       else
920         LLVM_DEBUG(dbgs()
921                    << "Failed to create new discriminator: "
922                    << DIL->getFilename() << " Line: " << DIL->getLine());
923     }
924     else
925       B.SetCurrentDebugLocation(DIL);
926   } else
927     B.SetCurrentDebugLocation(DebugLoc());
928 }
929 
930 /// Write a record \p DebugMsg about vectorization failure to the debug
931 /// output stream. If \p I is passed, it is an instruction that prevents
932 /// vectorization.
933 #ifndef NDEBUG
934 static void debugVectorizationFailure(const StringRef DebugMsg,
935     Instruction *I) {
936   dbgs() << "LV: Not vectorizing: " << DebugMsg;
937   if (I != nullptr)
938     dbgs() << " " << *I;
939   else
940     dbgs() << '.';
941   dbgs() << '\n';
942 }
943 #endif
944 
945 /// Create an analysis remark that explains why vectorization failed
946 ///
947 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
948 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
949 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
950 /// the location of the remark.  \return the remark object that can be
951 /// streamed to.
952 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
953     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
954   Value *CodeRegion = TheLoop->getHeader();
955   DebugLoc DL = TheLoop->getStartLoc();
956 
957   if (I) {
958     CodeRegion = I->getParent();
959     // If there is no debug location attached to the instruction, revert back to
960     // using the loop's.
961     if (I->getDebugLoc())
962       DL = I->getDebugLoc();
963   }
964 
965   OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
966   R << "loop not vectorized: ";
967   return R;
968 }
969 
970 namespace llvm {
971 
972 void reportVectorizationFailure(const StringRef DebugMsg,
973     const StringRef OREMsg, const StringRef ORETag,
974     OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) {
975   LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I));
976   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
977   ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(),
978                 ORETag, TheLoop, I) << OREMsg);
979 }
980 
981 } // end namespace llvm
982 
983 #ifndef NDEBUG
984 /// \return string containing a file name and a line # for the given loop.
985 static std::string getDebugLocString(const Loop *L) {
986   std::string Result;
987   if (L) {
988     raw_string_ostream OS(Result);
989     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
990       LoopDbgLoc.print(OS);
991     else
992       // Just print the module name.
993       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
994     OS.flush();
995   }
996   return Result;
997 }
998 #endif
999 
1000 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1001                                          const Instruction *Orig) {
1002   // If the loop was versioned with memchecks, add the corresponding no-alias
1003   // metadata.
1004   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1005     LVer->annotateInstWithNoAlias(To, Orig);
1006 }
1007 
1008 void InnerLoopVectorizer::addMetadata(Instruction *To,
1009                                       Instruction *From) {
1010   propagateMetadata(To, From);
1011   addNewMetadata(To, From);
1012 }
1013 
1014 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1015                                       Instruction *From) {
1016   for (Value *V : To) {
1017     if (Instruction *I = dyn_cast<Instruction>(V))
1018       addMetadata(I, From);
1019   }
1020 }
1021 
1022 namespace llvm {
1023 
1024 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1025 // lowered.
1026 enum ScalarEpilogueLowering {
1027 
1028   // The default: allowing scalar epilogues.
1029   CM_ScalarEpilogueAllowed,
1030 
1031   // Vectorization with OptForSize: don't allow epilogues.
1032   CM_ScalarEpilogueNotAllowedOptSize,
1033 
1034   // A special case of vectorisation with OptForSize: loops with a very small
1035   // trip count are considered for vectorization under OptForSize, thereby
1036   // making sure the cost of their loop body is dominant, free of runtime
1037   // guards and scalar iteration overheads.
1038   CM_ScalarEpilogueNotAllowedLowTripLoop,
1039 
1040   // Loop hint predicate indicating an epilogue is undesired.
1041   CM_ScalarEpilogueNotNeededUsePredicate
1042 };
1043 
1044 /// LoopVectorizationCostModel - estimates the expected speedups due to
1045 /// vectorization.
1046 /// In many cases vectorization is not profitable. This can happen because of
1047 /// a number of reasons. In this class we mainly attempt to predict the
1048 /// expected speedup/slowdowns due to the supported instruction set. We use the
1049 /// TargetTransformInfo to query the different backends for the cost of
1050 /// different operations.
1051 class LoopVectorizationCostModel {
1052 public:
1053   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1054                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1055                              LoopVectorizationLegality *Legal,
1056                              const TargetTransformInfo &TTI,
1057                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1058                              AssumptionCache *AC,
1059                              OptimizationRemarkEmitter *ORE, const Function *F,
1060                              const LoopVectorizeHints *Hints,
1061                              InterleavedAccessInfo &IAI)
1062       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1063         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1064         Hints(Hints), InterleaveInfo(IAI) {}
1065 
1066   /// \return An upper bound for the vectorization factor, or None if
1067   /// vectorization and interleaving should be avoided up front.
1068   Optional<unsigned> computeMaxVF(unsigned UserVF, unsigned UserIC);
1069 
1070   /// \return True if runtime checks are required for vectorization, and false
1071   /// otherwise.
1072   bool runtimeChecksRequired();
1073 
1074   /// \return The most profitable vectorization factor and the cost of that VF.
1075   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
1076   /// then this vectorization factor will be selected if vectorization is
1077   /// possible.
1078   VectorizationFactor selectVectorizationFactor(unsigned MaxVF);
1079 
1080   /// Setup cost-based decisions for user vectorization factor.
1081   void selectUserVectorizationFactor(ElementCount UserVF) {
1082     collectUniformsAndScalars(UserVF);
1083     collectInstsToScalarize(UserVF);
1084   }
1085 
1086   /// \return The size (in bits) of the smallest and widest types in the code
1087   /// that needs to be vectorized. We ignore values that remain scalar such as
1088   /// 64 bit loop indices.
1089   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1090 
1091   /// \return The desired interleave count.
1092   /// If interleave count has been specified by metadata it will be returned.
1093   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1094   /// are the selected vectorization factor and the cost of the selected VF.
1095   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1096 
1097   /// Memory access instruction may be vectorized in more than one way.
1098   /// Form of instruction after vectorization depends on cost.
1099   /// This function takes cost-based decisions for Load/Store instructions
1100   /// and collects them in a map. This decisions map is used for building
1101   /// the lists of loop-uniform and loop-scalar instructions.
1102   /// The calculated cost is saved with widening decision in order to
1103   /// avoid redundant calculations.
1104   void setCostBasedWideningDecision(ElementCount VF);
1105 
1106   /// A struct that represents some properties of the register usage
1107   /// of a loop.
1108   struct RegisterUsage {
1109     /// Holds the number of loop invariant values that are used in the loop.
1110     /// The key is ClassID of target-provided register class.
1111     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1112     /// Holds the maximum number of concurrent live intervals in the loop.
1113     /// The key is ClassID of target-provided register class.
1114     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1115   };
1116 
1117   /// \return Returns information about the register usages of the loop for the
1118   /// given vectorization factors.
1119   SmallVector<RegisterUsage, 8>
1120   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1121 
1122   /// Collect values we want to ignore in the cost model.
1123   void collectValuesToIgnore();
1124 
1125   /// Split reductions into those that happen in the loop, and those that happen
1126   /// outside. In loop reductions are collected into InLoopReductionChains.
1127   void collectInLoopReductions();
1128 
1129   /// \returns The smallest bitwidth each instruction can be represented with.
1130   /// The vector equivalents of these instructions should be truncated to this
1131   /// type.
1132   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1133     return MinBWs;
1134   }
1135 
1136   /// \returns True if it is more profitable to scalarize instruction \p I for
1137   /// vectorization factor \p VF.
1138   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1139     assert(VF.isVector() &&
1140            "Profitable to scalarize relevant only for VF > 1.");
1141 
1142     // Cost model is not run in the VPlan-native path - return conservative
1143     // result until this changes.
1144     if (EnableVPlanNativePath)
1145       return false;
1146 
1147     auto Scalars = InstsToScalarize.find(VF);
1148     assert(Scalars != InstsToScalarize.end() &&
1149            "VF not yet analyzed for scalarization profitability");
1150     return Scalars->second.find(I) != Scalars->second.end();
1151   }
1152 
1153   /// Returns true if \p I is known to be uniform after vectorization.
1154   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1155     if (VF.isScalar())
1156       return true;
1157 
1158     // Cost model is not run in the VPlan-native path - return conservative
1159     // result until this changes.
1160     if (EnableVPlanNativePath)
1161       return false;
1162 
1163     auto UniformsPerVF = Uniforms.find(VF);
1164     assert(UniformsPerVF != Uniforms.end() &&
1165            "VF not yet analyzed for uniformity");
1166     return UniformsPerVF->second.count(I);
1167   }
1168 
1169   /// Returns true if \p I is known to be scalar after vectorization.
1170   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1171     if (VF.isScalar())
1172       return true;
1173 
1174     // Cost model is not run in the VPlan-native path - return conservative
1175     // result until this changes.
1176     if (EnableVPlanNativePath)
1177       return false;
1178 
1179     auto ScalarsPerVF = Scalars.find(VF);
1180     assert(ScalarsPerVF != Scalars.end() &&
1181            "Scalar values are not calculated for VF");
1182     return ScalarsPerVF->second.count(I);
1183   }
1184 
1185   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1186   /// for vectorization factor \p VF.
1187   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1188     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1189            !isProfitableToScalarize(I, VF) &&
1190            !isScalarAfterVectorization(I, VF);
1191   }
1192 
1193   /// Decision that was taken during cost calculation for memory instruction.
1194   enum InstWidening {
1195     CM_Unknown,
1196     CM_Widen,         // For consecutive accesses with stride +1.
1197     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1198     CM_Interleave,
1199     CM_GatherScatter,
1200     CM_Scalarize
1201   };
1202 
1203   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1204   /// instruction \p I and vector width \p VF.
1205   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1206                            unsigned Cost) {
1207     assert(VF.isVector() && "Expected VF >=2");
1208     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1209   }
1210 
1211   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1212   /// interleaving group \p Grp and vector width \p VF.
1213   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1214                            ElementCount VF, InstWidening W, unsigned Cost) {
1215     assert(VF.isVector() && "Expected VF >=2");
1216     /// Broadcast this decicion to all instructions inside the group.
1217     /// But the cost will be assigned to one instruction only.
1218     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1219       if (auto *I = Grp->getMember(i)) {
1220         if (Grp->getInsertPos() == I)
1221           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1222         else
1223           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1224       }
1225     }
1226   }
1227 
1228   /// Return the cost model decision for the given instruction \p I and vector
1229   /// width \p VF. Return CM_Unknown if this instruction did not pass
1230   /// through the cost modeling.
1231   InstWidening getWideningDecision(Instruction *I, ElementCount VF) {
1232     assert(!VF.isScalable() && "scalable vectors not yet supported.");
1233     assert(VF.isVector() && "Expected VF >=2");
1234 
1235     // Cost model is not run in the VPlan-native path - return conservative
1236     // result until this changes.
1237     if (EnableVPlanNativePath)
1238       return CM_GatherScatter;
1239 
1240     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1241     auto Itr = WideningDecisions.find(InstOnVF);
1242     if (Itr == WideningDecisions.end())
1243       return CM_Unknown;
1244     return Itr->second.first;
1245   }
1246 
1247   /// Return the vectorization cost for the given instruction \p I and vector
1248   /// width \p VF.
1249   unsigned getWideningCost(Instruction *I, ElementCount VF) {
1250     assert(VF.isVector() && "Expected VF >=2");
1251     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1252     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1253            "The cost is not calculated");
1254     return WideningDecisions[InstOnVF].second;
1255   }
1256 
1257   /// Return True if instruction \p I is an optimizable truncate whose operand
1258   /// is an induction variable. Such a truncate will be removed by adding a new
1259   /// induction variable with the destination type.
1260   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1261     // If the instruction is not a truncate, return false.
1262     auto *Trunc = dyn_cast<TruncInst>(I);
1263     if (!Trunc)
1264       return false;
1265 
1266     // Get the source and destination types of the truncate.
1267     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1268     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1269 
1270     // If the truncate is free for the given types, return false. Replacing a
1271     // free truncate with an induction variable would add an induction variable
1272     // update instruction to each iteration of the loop. We exclude from this
1273     // check the primary induction variable since it will need an update
1274     // instruction regardless.
1275     Value *Op = Trunc->getOperand(0);
1276     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1277       return false;
1278 
1279     // If the truncated value is not an induction variable, return false.
1280     return Legal->isInductionPhi(Op);
1281   }
1282 
1283   /// Collects the instructions to scalarize for each predicated instruction in
1284   /// the loop.
1285   void collectInstsToScalarize(ElementCount VF);
1286 
1287   /// Collect Uniform and Scalar values for the given \p VF.
1288   /// The sets depend on CM decision for Load/Store instructions
1289   /// that may be vectorized as interleave, gather-scatter or scalarized.
1290   void collectUniformsAndScalars(ElementCount VF) {
1291     // Do the analysis once.
1292     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1293       return;
1294     setCostBasedWideningDecision(VF);
1295     collectLoopUniforms(VF);
1296     collectLoopScalars(VF);
1297   }
1298 
1299   /// Returns true if the target machine supports masked store operation
1300   /// for the given \p DataType and kind of access to \p Ptr.
1301   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) {
1302     return Legal->isConsecutivePtr(Ptr) &&
1303            TTI.isLegalMaskedStore(DataType, Alignment);
1304   }
1305 
1306   /// Returns true if the target machine supports masked load operation
1307   /// for the given \p DataType and kind of access to \p Ptr.
1308   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) {
1309     return Legal->isConsecutivePtr(Ptr) &&
1310            TTI.isLegalMaskedLoad(DataType, Alignment);
1311   }
1312 
1313   /// Returns true if the target machine supports masked scatter operation
1314   /// for the given \p DataType.
1315   bool isLegalMaskedScatter(Type *DataType, Align Alignment) {
1316     return TTI.isLegalMaskedScatter(DataType, Alignment);
1317   }
1318 
1319   /// Returns true if the target machine supports masked gather operation
1320   /// for the given \p DataType.
1321   bool isLegalMaskedGather(Type *DataType, Align Alignment) {
1322     return TTI.isLegalMaskedGather(DataType, Alignment);
1323   }
1324 
1325   /// Returns true if the target machine can represent \p V as a masked gather
1326   /// or scatter operation.
1327   bool isLegalGatherOrScatter(Value *V) {
1328     bool LI = isa<LoadInst>(V);
1329     bool SI = isa<StoreInst>(V);
1330     if (!LI && !SI)
1331       return false;
1332     auto *Ty = getMemInstValueType(V);
1333     Align Align = getLoadStoreAlignment(V);
1334     return (LI && isLegalMaskedGather(Ty, Align)) ||
1335            (SI && isLegalMaskedScatter(Ty, Align));
1336   }
1337 
1338   /// Returns true if \p I is an instruction that will be scalarized with
1339   /// predication. Such instructions include conditional stores and
1340   /// instructions that may divide by zero.
1341   /// If a non-zero VF has been calculated, we check if I will be scalarized
1342   /// predication for that VF.
1343   bool isScalarWithPredication(Instruction *I,
1344                                ElementCount VF = ElementCount::getFixed(1));
1345 
1346   // Returns true if \p I is an instruction that will be predicated either
1347   // through scalar predication or masked load/store or masked gather/scatter.
1348   // Superset of instructions that return true for isScalarWithPredication.
1349   bool isPredicatedInst(Instruction *I) {
1350     if (!blockNeedsPredication(I->getParent()))
1351       return false;
1352     // Loads and stores that need some form of masked operation are predicated
1353     // instructions.
1354     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1355       return Legal->isMaskRequired(I);
1356     return isScalarWithPredication(I);
1357   }
1358 
1359   /// Returns true if \p I is a memory instruction with consecutive memory
1360   /// access that can be widened.
1361   bool
1362   memoryInstructionCanBeWidened(Instruction *I,
1363                                 ElementCount VF = ElementCount::getFixed(1));
1364 
1365   /// Returns true if \p I is a memory instruction in an interleaved-group
1366   /// of memory accesses that can be vectorized with wide vector loads/stores
1367   /// and shuffles.
1368   bool
1369   interleavedAccessCanBeWidened(Instruction *I,
1370                                 ElementCount VF = ElementCount::getFixed(1));
1371 
1372   /// Check if \p Instr belongs to any interleaved access group.
1373   bool isAccessInterleaved(Instruction *Instr) {
1374     return InterleaveInfo.isInterleaved(Instr);
1375   }
1376 
1377   /// Get the interleaved access group that \p Instr belongs to.
1378   const InterleaveGroup<Instruction> *
1379   getInterleavedAccessGroup(Instruction *Instr) {
1380     return InterleaveInfo.getInterleaveGroup(Instr);
1381   }
1382 
1383   /// Returns true if an interleaved group requires a scalar iteration
1384   /// to handle accesses with gaps, and there is nothing preventing us from
1385   /// creating a scalar epilogue.
1386   bool requiresScalarEpilogue() const {
1387     return isScalarEpilogueAllowed() && InterleaveInfo.requiresScalarEpilogue();
1388   }
1389 
1390   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1391   /// loop hint annotation.
1392   bool isScalarEpilogueAllowed() const {
1393     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1394   }
1395 
1396   /// Returns true if all loop blocks should be masked to fold tail loop.
1397   bool foldTailByMasking() const { return FoldTailByMasking; }
1398 
1399   bool blockNeedsPredication(BasicBlock *BB) {
1400     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1401   }
1402 
1403   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1404   /// nodes to the chain of instructions representing the reductions. Uses a
1405   /// MapVector to ensure deterministic iteration order.
1406   using ReductionChainMap =
1407       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1408 
1409   /// Return the chain of instructions representing an inloop reduction.
1410   const ReductionChainMap &getInLoopReductionChains() const {
1411     return InLoopReductionChains;
1412   }
1413 
1414   /// Returns true if the Phi is part of an inloop reduction.
1415   bool isInLoopReduction(PHINode *Phi) const {
1416     return InLoopReductionChains.count(Phi);
1417   }
1418 
1419   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1420   /// with factor VF.  Return the cost of the instruction, including
1421   /// scalarization overhead if it's needed.
1422   unsigned getVectorIntrinsicCost(CallInst *CI, ElementCount VF);
1423 
1424   /// Estimate cost of a call instruction CI if it were vectorized with factor
1425   /// VF. Return the cost of the instruction, including scalarization overhead
1426   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1427   /// scalarized -
1428   /// i.e. either vector version isn't available, or is too expensive.
1429   unsigned getVectorCallCost(CallInst *CI, ElementCount VF,
1430                              bool &NeedToScalarize);
1431 
1432   /// Invalidates decisions already taken by the cost model.
1433   void invalidateCostModelingDecisions() {
1434     WideningDecisions.clear();
1435     Uniforms.clear();
1436     Scalars.clear();
1437   }
1438 
1439 private:
1440   unsigned NumPredStores = 0;
1441 
1442   /// \return An upper bound for the vectorization factor, a power-of-2 larger
1443   /// than zero. One is returned if vectorization should best be avoided due
1444   /// to cost.
1445   unsigned computeFeasibleMaxVF(unsigned ConstTripCount);
1446 
1447   /// The vectorization cost is a combination of the cost itself and a boolean
1448   /// indicating whether any of the contributing operations will actually
1449   /// operate on
1450   /// vector values after type legalization in the backend. If this latter value
1451   /// is
1452   /// false, then all operations will be scalarized (i.e. no vectorization has
1453   /// actually taken place).
1454   using VectorizationCostTy = std::pair<unsigned, bool>;
1455 
1456   /// Returns the expected execution cost. The unit of the cost does
1457   /// not matter because we use the 'cost' units to compare different
1458   /// vector widths. The cost that is returned is *not* normalized by
1459   /// the factor width.
1460   VectorizationCostTy expectedCost(ElementCount VF);
1461 
1462   /// Returns the execution time cost of an instruction for a given vector
1463   /// width. Vector width of one means scalar.
1464   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1465 
1466   /// The cost-computation logic from getInstructionCost which provides
1467   /// the vector type as an output parameter.
1468   unsigned getInstructionCost(Instruction *I, ElementCount VF, Type *&VectorTy);
1469 
1470   /// Calculate vectorization cost of memory instruction \p I.
1471   unsigned getMemoryInstructionCost(Instruction *I, ElementCount VF);
1472 
1473   /// The cost computation for scalarized memory instruction.
1474   unsigned getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1475 
1476   /// The cost computation for interleaving group of memory instructions.
1477   unsigned getInterleaveGroupCost(Instruction *I, ElementCount VF);
1478 
1479   /// The cost computation for Gather/Scatter instruction.
1480   unsigned getGatherScatterCost(Instruction *I, ElementCount VF);
1481 
1482   /// The cost computation for widening instruction \p I with consecutive
1483   /// memory access.
1484   unsigned getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1485 
1486   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1487   /// Load: scalar load + broadcast.
1488   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1489   /// element)
1490   unsigned getUniformMemOpCost(Instruction *I, ElementCount VF);
1491 
1492   /// Estimate the overhead of scalarizing an instruction. This is a
1493   /// convenience wrapper for the type-based getScalarizationOverhead API.
1494   unsigned getScalarizationOverhead(Instruction *I, ElementCount VF);
1495 
1496   /// Returns whether the instruction is a load or store and will be a emitted
1497   /// as a vector operation.
1498   bool isConsecutiveLoadOrStore(Instruction *I);
1499 
1500   /// Returns true if an artificially high cost for emulated masked memrefs
1501   /// should be used.
1502   bool useEmulatedMaskMemRefHack(Instruction *I);
1503 
1504   /// Map of scalar integer values to the smallest bitwidth they can be legally
1505   /// represented as. The vector equivalents of these values should be truncated
1506   /// to this type.
1507   MapVector<Instruction *, uint64_t> MinBWs;
1508 
1509   /// A type representing the costs for instructions if they were to be
1510   /// scalarized rather than vectorized. The entries are Instruction-Cost
1511   /// pairs.
1512   using ScalarCostsTy = DenseMap<Instruction *, unsigned>;
1513 
1514   /// A set containing all BasicBlocks that are known to present after
1515   /// vectorization as a predicated block.
1516   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1517 
1518   /// Records whether it is allowed to have the original scalar loop execute at
1519   /// least once. This may be needed as a fallback loop in case runtime
1520   /// aliasing/dependence checks fail, or to handle the tail/remainder
1521   /// iterations when the trip count is unknown or doesn't divide by the VF,
1522   /// or as a peel-loop to handle gaps in interleave-groups.
1523   /// Under optsize and when the trip count is very small we don't allow any
1524   /// iterations to execute in the scalar loop.
1525   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1526 
1527   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1528   bool FoldTailByMasking = false;
1529 
1530   /// A map holding scalar costs for different vectorization factors. The
1531   /// presence of a cost for an instruction in the mapping indicates that the
1532   /// instruction will be scalarized when vectorizing with the associated
1533   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1534   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1535 
1536   /// Holds the instructions known to be uniform after vectorization.
1537   /// The data is collected per VF.
1538   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1539 
1540   /// Holds the instructions known to be scalar after vectorization.
1541   /// The data is collected per VF.
1542   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1543 
1544   /// Holds the instructions (address computations) that are forced to be
1545   /// scalarized.
1546   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1547 
1548   /// PHINodes of the reductions that should be expanded in-loop along with
1549   /// their associated chains of reduction operations, in program order from top
1550   /// (PHI) to bottom
1551   ReductionChainMap InLoopReductionChains;
1552 
1553   /// Returns the expected difference in cost from scalarizing the expression
1554   /// feeding a predicated instruction \p PredInst. The instructions to
1555   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1556   /// non-negative return value implies the expression will be scalarized.
1557   /// Currently, only single-use chains are considered for scalarization.
1558   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1559                               ElementCount VF);
1560 
1561   /// Collect the instructions that are uniform after vectorization. An
1562   /// instruction is uniform if we represent it with a single scalar value in
1563   /// the vectorized loop corresponding to each vector iteration. Examples of
1564   /// uniform instructions include pointer operands of consecutive or
1565   /// interleaved memory accesses. Note that although uniformity implies an
1566   /// instruction will be scalar, the reverse is not true. In general, a
1567   /// scalarized instruction will be represented by VF scalar values in the
1568   /// vectorized loop, each corresponding to an iteration of the original
1569   /// scalar loop.
1570   void collectLoopUniforms(ElementCount VF);
1571 
1572   /// Collect the instructions that are scalar after vectorization. An
1573   /// instruction is scalar if it is known to be uniform or will be scalarized
1574   /// during vectorization. Non-uniform scalarized instructions will be
1575   /// represented by VF values in the vectorized loop, each corresponding to an
1576   /// iteration of the original scalar loop.
1577   void collectLoopScalars(ElementCount VF);
1578 
1579   /// Keeps cost model vectorization decision and cost for instructions.
1580   /// Right now it is used for memory instructions only.
1581   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1582                                 std::pair<InstWidening, unsigned>>;
1583 
1584   DecisionList WideningDecisions;
1585 
1586   /// Returns true if \p V is expected to be vectorized and it needs to be
1587   /// extracted.
1588   bool needsExtract(Value *V, ElementCount VF) const {
1589     Instruction *I = dyn_cast<Instruction>(V);
1590     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1591         TheLoop->isLoopInvariant(I))
1592       return false;
1593 
1594     // Assume we can vectorize V (and hence we need extraction) if the
1595     // scalars are not computed yet. This can happen, because it is called
1596     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1597     // the scalars are collected. That should be a safe assumption in most
1598     // cases, because we check if the operands have vectorizable types
1599     // beforehand in LoopVectorizationLegality.
1600     return Scalars.find(VF) == Scalars.end() ||
1601            !isScalarAfterVectorization(I, VF);
1602   };
1603 
1604   /// Returns a range containing only operands needing to be extracted.
1605   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1606                                                    ElementCount VF) {
1607     return SmallVector<Value *, 4>(make_filter_range(
1608         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1609   }
1610 
1611 public:
1612   /// The loop that we evaluate.
1613   Loop *TheLoop;
1614 
1615   /// Predicated scalar evolution analysis.
1616   PredicatedScalarEvolution &PSE;
1617 
1618   /// Loop Info analysis.
1619   LoopInfo *LI;
1620 
1621   /// Vectorization legality.
1622   LoopVectorizationLegality *Legal;
1623 
1624   /// Vector target information.
1625   const TargetTransformInfo &TTI;
1626 
1627   /// Target Library Info.
1628   const TargetLibraryInfo *TLI;
1629 
1630   /// Demanded bits analysis.
1631   DemandedBits *DB;
1632 
1633   /// Assumption cache.
1634   AssumptionCache *AC;
1635 
1636   /// Interface to emit optimization remarks.
1637   OptimizationRemarkEmitter *ORE;
1638 
1639   const Function *TheFunction;
1640 
1641   /// Loop Vectorize Hint.
1642   const LoopVectorizeHints *Hints;
1643 
1644   /// The interleave access information contains groups of interleaved accesses
1645   /// with the same stride and close to each other.
1646   InterleavedAccessInfo &InterleaveInfo;
1647 
1648   /// Values to ignore in the cost model.
1649   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1650 
1651   /// Values to ignore in the cost model when VF > 1.
1652   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1653 };
1654 
1655 } // end namespace llvm
1656 
1657 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
1658 // vectorization. The loop needs to be annotated with #pragma omp simd
1659 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
1660 // vector length information is not provided, vectorization is not considered
1661 // explicit. Interleave hints are not allowed either. These limitations will be
1662 // relaxed in the future.
1663 // Please, note that we are currently forced to abuse the pragma 'clang
1664 // vectorize' semantics. This pragma provides *auto-vectorization hints*
1665 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
1666 // provides *explicit vectorization hints* (LV can bypass legal checks and
1667 // assume that vectorization is legal). However, both hints are implemented
1668 // using the same metadata (llvm.loop.vectorize, processed by
1669 // LoopVectorizeHints). This will be fixed in the future when the native IR
1670 // representation for pragma 'omp simd' is introduced.
1671 static bool isExplicitVecOuterLoop(Loop *OuterLp,
1672                                    OptimizationRemarkEmitter *ORE) {
1673   assert(!OuterLp->isInnermost() && "This is not an outer loop");
1674   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
1675 
1676   // Only outer loops with an explicit vectorization hint are supported.
1677   // Unannotated outer loops are ignored.
1678   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
1679     return false;
1680 
1681   Function *Fn = OuterLp->getHeader()->getParent();
1682   if (!Hints.allowVectorization(Fn, OuterLp,
1683                                 true /*VectorizeOnlyWhenForced*/)) {
1684     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
1685     return false;
1686   }
1687 
1688   if (Hints.getInterleave() > 1) {
1689     // TODO: Interleave support is future work.
1690     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
1691                          "outer loops.\n");
1692     Hints.emitRemarkWithHints();
1693     return false;
1694   }
1695 
1696   return true;
1697 }
1698 
1699 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
1700                                   OptimizationRemarkEmitter *ORE,
1701                                   SmallVectorImpl<Loop *> &V) {
1702   // Collect inner loops and outer loops without irreducible control flow. For
1703   // now, only collect outer loops that have explicit vectorization hints. If we
1704   // are stress testing the VPlan H-CFG construction, we collect the outermost
1705   // loop of every loop nest.
1706   if (L.isInnermost() || VPlanBuildStressTest ||
1707       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
1708     LoopBlocksRPO RPOT(&L);
1709     RPOT.perform(LI);
1710     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
1711       V.push_back(&L);
1712       // TODO: Collect inner loops inside marked outer loops in case
1713       // vectorization fails for the outer loop. Do not invoke
1714       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
1715       // already known to be reducible. We can use an inherited attribute for
1716       // that.
1717       return;
1718     }
1719   }
1720   for (Loop *InnerL : L)
1721     collectSupportedLoops(*InnerL, LI, ORE, V);
1722 }
1723 
1724 namespace {
1725 
1726 /// The LoopVectorize Pass.
1727 struct LoopVectorize : public FunctionPass {
1728   /// Pass identification, replacement for typeid
1729   static char ID;
1730 
1731   LoopVectorizePass Impl;
1732 
1733   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
1734                          bool VectorizeOnlyWhenForced = false)
1735       : FunctionPass(ID),
1736         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
1737     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
1738   }
1739 
1740   bool runOnFunction(Function &F) override {
1741     if (skipFunction(F))
1742       return false;
1743 
1744     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1745     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1746     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1747     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1748     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
1749     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1750     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
1751     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1752     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1753     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
1754     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
1755     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
1756     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
1757 
1758     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
1759         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
1760 
1761     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
1762                         GetLAA, *ORE, PSI).MadeAnyChange;
1763   }
1764 
1765   void getAnalysisUsage(AnalysisUsage &AU) const override {
1766     AU.addRequired<AssumptionCacheTracker>();
1767     AU.addRequired<BlockFrequencyInfoWrapperPass>();
1768     AU.addRequired<DominatorTreeWrapperPass>();
1769     AU.addRequired<LoopInfoWrapperPass>();
1770     AU.addRequired<ScalarEvolutionWrapperPass>();
1771     AU.addRequired<TargetTransformInfoWrapperPass>();
1772     AU.addRequired<AAResultsWrapperPass>();
1773     AU.addRequired<LoopAccessLegacyAnalysis>();
1774     AU.addRequired<DemandedBitsWrapperPass>();
1775     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
1776     AU.addRequired<InjectTLIMappingsLegacy>();
1777 
1778     // We currently do not preserve loopinfo/dominator analyses with outer loop
1779     // vectorization. Until this is addressed, mark these analyses as preserved
1780     // only for non-VPlan-native path.
1781     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
1782     if (!EnableVPlanNativePath) {
1783       AU.addPreserved<LoopInfoWrapperPass>();
1784       AU.addPreserved<DominatorTreeWrapperPass>();
1785     }
1786 
1787     AU.addPreserved<BasicAAWrapperPass>();
1788     AU.addPreserved<GlobalsAAWrapperPass>();
1789     AU.addRequired<ProfileSummaryInfoWrapperPass>();
1790   }
1791 };
1792 
1793 } // end anonymous namespace
1794 
1795 //===----------------------------------------------------------------------===//
1796 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
1797 // LoopVectorizationCostModel and LoopVectorizationPlanner.
1798 //===----------------------------------------------------------------------===//
1799 
1800 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
1801   // We need to place the broadcast of invariant variables outside the loop,
1802   // but only if it's proven safe to do so. Else, broadcast will be inside
1803   // vector loop body.
1804   Instruction *Instr = dyn_cast<Instruction>(V);
1805   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
1806                      (!Instr ||
1807                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
1808   // Place the code for broadcasting invariant variables in the new preheader.
1809   IRBuilder<>::InsertPointGuard Guard(Builder);
1810   if (SafeToHoist)
1811     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1812 
1813   // Broadcast the scalar into all locations in the vector.
1814   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
1815 
1816   return Shuf;
1817 }
1818 
1819 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
1820     const InductionDescriptor &II, Value *Step, Instruction *EntryVal) {
1821   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1822          "Expected either an induction phi-node or a truncate of it!");
1823   Value *Start = II.getStartValue();
1824 
1825   // Construct the initial value of the vector IV in the vector loop preheader
1826   auto CurrIP = Builder.saveIP();
1827   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1828   if (isa<TruncInst>(EntryVal)) {
1829     assert(Start->getType()->isIntegerTy() &&
1830            "Truncation requires an integer type");
1831     auto *TruncType = cast<IntegerType>(EntryVal->getType());
1832     Step = Builder.CreateTrunc(Step, TruncType);
1833     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
1834   }
1835   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
1836   Value *SteppedStart =
1837       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
1838 
1839   // We create vector phi nodes for both integer and floating-point induction
1840   // variables. Here, we determine the kind of arithmetic we will perform.
1841   Instruction::BinaryOps AddOp;
1842   Instruction::BinaryOps MulOp;
1843   if (Step->getType()->isIntegerTy()) {
1844     AddOp = Instruction::Add;
1845     MulOp = Instruction::Mul;
1846   } else {
1847     AddOp = II.getInductionOpcode();
1848     MulOp = Instruction::FMul;
1849   }
1850 
1851   // Multiply the vectorization factor by the step using integer or
1852   // floating-point arithmetic as appropriate.
1853   Value *ConstVF =
1854       getSignedIntOrFpConstant(Step->getType(), VF.getKnownMinValue());
1855   Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
1856 
1857   // Create a vector splat to use in the induction update.
1858   //
1859   // FIXME: If the step is non-constant, we create the vector splat with
1860   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
1861   //        handle a constant vector splat.
1862   assert(!VF.isScalable() && "scalable vectors not yet supported.");
1863   Value *SplatVF = isa<Constant>(Mul)
1864                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
1865                        : Builder.CreateVectorSplat(VF, Mul);
1866   Builder.restoreIP(CurrIP);
1867 
1868   // We may need to add the step a number of times, depending on the unroll
1869   // factor. The last of those goes into the PHI.
1870   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
1871                                     &*LoopVectorBody->getFirstInsertionPt());
1872   VecInd->setDebugLoc(EntryVal->getDebugLoc());
1873   Instruction *LastInduction = VecInd;
1874   for (unsigned Part = 0; Part < UF; ++Part) {
1875     VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction);
1876 
1877     if (isa<TruncInst>(EntryVal))
1878       addMetadata(LastInduction, EntryVal);
1879     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part);
1880 
1881     LastInduction = cast<Instruction>(addFastMathFlag(
1882         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
1883     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
1884   }
1885 
1886   // Move the last step to the end of the latch block. This ensures consistent
1887   // placement of all induction updates.
1888   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
1889   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
1890   auto *ICmp = cast<Instruction>(Br->getCondition());
1891   LastInduction->moveBefore(ICmp);
1892   LastInduction->setName("vec.ind.next");
1893 
1894   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
1895   VecInd->addIncoming(LastInduction, LoopVectorLatch);
1896 }
1897 
1898 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
1899   return Cost->isScalarAfterVectorization(I, VF) ||
1900          Cost->isProfitableToScalarize(I, VF);
1901 }
1902 
1903 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
1904   if (shouldScalarizeInstruction(IV))
1905     return true;
1906   auto isScalarInst = [&](User *U) -> bool {
1907     auto *I = cast<Instruction>(U);
1908     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
1909   };
1910   return llvm::any_of(IV->users(), isScalarInst);
1911 }
1912 
1913 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
1914     const InductionDescriptor &ID, const Instruction *EntryVal,
1915     Value *VectorLoopVal, unsigned Part, unsigned Lane) {
1916   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1917          "Expected either an induction phi-node or a truncate of it!");
1918 
1919   // This induction variable is not the phi from the original loop but the
1920   // newly-created IV based on the proof that casted Phi is equal to the
1921   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
1922   // re-uses the same InductionDescriptor that original IV uses but we don't
1923   // have to do any recording in this case - that is done when original IV is
1924   // processed.
1925   if (isa<TruncInst>(EntryVal))
1926     return;
1927 
1928   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
1929   if (Casts.empty())
1930     return;
1931   // Only the first Cast instruction in the Casts vector is of interest.
1932   // The rest of the Casts (if exist) have no uses outside the
1933   // induction update chain itself.
1934   Instruction *CastInst = *Casts.begin();
1935   if (Lane < UINT_MAX)
1936     VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal);
1937   else
1938     VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal);
1939 }
1940 
1941 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) {
1942   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
1943          "Primary induction variable must have an integer type");
1944 
1945   auto II = Legal->getInductionVars().find(IV);
1946   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
1947 
1948   auto ID = II->second;
1949   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
1950 
1951   // The value from the original loop to which we are mapping the new induction
1952   // variable.
1953   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
1954 
1955   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
1956 
1957   // Generate code for the induction step. Note that induction steps are
1958   // required to be loop-invariant
1959   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
1960     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
1961            "Induction step should be loop invariant");
1962     if (PSE.getSE()->isSCEVable(IV->getType())) {
1963       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
1964       return Exp.expandCodeFor(Step, Step->getType(),
1965                                LoopVectorPreHeader->getTerminator());
1966     }
1967     return cast<SCEVUnknown>(Step)->getValue();
1968   };
1969 
1970   // The scalar value to broadcast. This is derived from the canonical
1971   // induction variable. If a truncation type is given, truncate the canonical
1972   // induction variable and step. Otherwise, derive these values from the
1973   // induction descriptor.
1974   auto CreateScalarIV = [&](Value *&Step) -> Value * {
1975     Value *ScalarIV = Induction;
1976     if (IV != OldInduction) {
1977       ScalarIV = IV->getType()->isIntegerTy()
1978                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
1979                      : Builder.CreateCast(Instruction::SIToFP, Induction,
1980                                           IV->getType());
1981       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
1982       ScalarIV->setName("offset.idx");
1983     }
1984     if (Trunc) {
1985       auto *TruncType = cast<IntegerType>(Trunc->getType());
1986       assert(Step->getType()->isIntegerTy() &&
1987              "Truncation requires an integer step");
1988       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
1989       Step = Builder.CreateTrunc(Step, TruncType);
1990     }
1991     return ScalarIV;
1992   };
1993 
1994   // Create the vector values from the scalar IV, in the absence of creating a
1995   // vector IV.
1996   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
1997     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
1998     for (unsigned Part = 0; Part < UF; ++Part) {
1999       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2000       Value *EntryPart =
2001           getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step,
2002                         ID.getInductionOpcode());
2003       VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart);
2004       if (Trunc)
2005         addMetadata(EntryPart, Trunc);
2006       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part);
2007     }
2008   };
2009 
2010   // Now do the actual transformations, and start with creating the step value.
2011   Value *Step = CreateStepValue(ID.getStep());
2012   if (VF.isZero() || VF.isScalar()) {
2013     Value *ScalarIV = CreateScalarIV(Step);
2014     CreateSplatIV(ScalarIV, Step);
2015     return;
2016   }
2017 
2018   // Determine if we want a scalar version of the induction variable. This is
2019   // true if the induction variable itself is not widened, or if it has at
2020   // least one user in the loop that is not widened.
2021   auto NeedsScalarIV = needsScalarInduction(EntryVal);
2022   if (!NeedsScalarIV) {
2023     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
2024     return;
2025   }
2026 
2027   // Try to create a new independent vector induction variable. If we can't
2028   // create the phi node, we will splat the scalar induction variable in each
2029   // loop iteration.
2030   if (!shouldScalarizeInstruction(EntryVal)) {
2031     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
2032     Value *ScalarIV = CreateScalarIV(Step);
2033     // Create scalar steps that can be used by instructions we will later
2034     // scalarize. Note that the addition of the scalar steps will not increase
2035     // the number of instructions in the loop in the common case prior to
2036     // InstCombine. We will be trading one vector extract for each scalar step.
2037     buildScalarSteps(ScalarIV, Step, EntryVal, ID);
2038     return;
2039   }
2040 
2041   // All IV users are scalar instructions, so only emit a scalar IV, not a
2042   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2043   // predicate used by the masked loads/stores.
2044   Value *ScalarIV = CreateScalarIV(Step);
2045   if (!Cost->isScalarEpilogueAllowed())
2046     CreateSplatIV(ScalarIV, Step);
2047   buildScalarSteps(ScalarIV, Step, EntryVal, ID);
2048 }
2049 
2050 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
2051                                           Instruction::BinaryOps BinOp) {
2052   // Create and check the types.
2053   auto *ValVTy = cast<FixedVectorType>(Val->getType());
2054   int VLen = ValVTy->getNumElements();
2055 
2056   Type *STy = Val->getType()->getScalarType();
2057   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2058          "Induction Step must be an integer or FP");
2059   assert(Step->getType() == STy && "Step has wrong type");
2060 
2061   SmallVector<Constant *, 8> Indices;
2062 
2063   if (STy->isIntegerTy()) {
2064     // Create a vector of consecutive numbers from zero to VF.
2065     for (int i = 0; i < VLen; ++i)
2066       Indices.push_back(ConstantInt::get(STy, StartIdx + i));
2067 
2068     // Add the consecutive indices to the vector value.
2069     Constant *Cv = ConstantVector::get(Indices);
2070     assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
2071     Step = Builder.CreateVectorSplat(VLen, Step);
2072     assert(Step->getType() == Val->getType() && "Invalid step vec");
2073     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2074     // which can be found from the original scalar operations.
2075     Step = Builder.CreateMul(Cv, Step);
2076     return Builder.CreateAdd(Val, Step, "induction");
2077   }
2078 
2079   // Floating point induction.
2080   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2081          "Binary Opcode should be specified for FP induction");
2082   // Create a vector of consecutive numbers from zero to VF.
2083   for (int i = 0; i < VLen; ++i)
2084     Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
2085 
2086   // Add the consecutive indices to the vector value.
2087   Constant *Cv = ConstantVector::get(Indices);
2088 
2089   Step = Builder.CreateVectorSplat(VLen, Step);
2090 
2091   // Floating point operations had to be 'fast' to enable the induction.
2092   FastMathFlags Flags;
2093   Flags.setFast();
2094 
2095   Value *MulOp = Builder.CreateFMul(Cv, Step);
2096   if (isa<Instruction>(MulOp))
2097     // Have to check, MulOp may be a constant
2098     cast<Instruction>(MulOp)->setFastMathFlags(Flags);
2099 
2100   Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2101   if (isa<Instruction>(BOp))
2102     cast<Instruction>(BOp)->setFastMathFlags(Flags);
2103   return BOp;
2104 }
2105 
2106 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2107                                            Instruction *EntryVal,
2108                                            const InductionDescriptor &ID) {
2109   // We shouldn't have to build scalar steps if we aren't vectorizing.
2110   assert(VF.isVector() && "VF should be greater than one");
2111   assert(!VF.isScalable() &&
2112          "the code below assumes a fixed number of elements at compile time");
2113   // Get the value type and ensure it and the step have the same integer type.
2114   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2115   assert(ScalarIVTy == Step->getType() &&
2116          "Val and Step should have the same type");
2117 
2118   // We build scalar steps for both integer and floating-point induction
2119   // variables. Here, we determine the kind of arithmetic we will perform.
2120   Instruction::BinaryOps AddOp;
2121   Instruction::BinaryOps MulOp;
2122   if (ScalarIVTy->isIntegerTy()) {
2123     AddOp = Instruction::Add;
2124     MulOp = Instruction::Mul;
2125   } else {
2126     AddOp = ID.getInductionOpcode();
2127     MulOp = Instruction::FMul;
2128   }
2129 
2130   // Determine the number of scalars we need to generate for each unroll
2131   // iteration. If EntryVal is uniform, we only need to generate the first
2132   // lane. Otherwise, we generate all VF values.
2133   unsigned Lanes =
2134       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF)
2135           ? 1
2136           : VF.getKnownMinValue();
2137   // Compute the scalar steps and save the results in VectorLoopValueMap.
2138   for (unsigned Part = 0; Part < UF; ++Part) {
2139     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2140       auto *StartIdx = getSignedIntOrFpConstant(
2141           ScalarIVTy, VF.getKnownMinValue() * Part + Lane);
2142       auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
2143       auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
2144       VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add);
2145       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane);
2146     }
2147   }
2148 }
2149 
2150 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) {
2151   assert(V != Induction && "The new induction variable should not be used.");
2152   assert(!V->getType()->isVectorTy() && "Can't widen a vector");
2153   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2154 
2155   // If we have a stride that is replaced by one, do it here. Defer this for
2156   // the VPlan-native path until we start running Legal checks in that path.
2157   if (!EnableVPlanNativePath && Legal->hasStride(V))
2158     V = ConstantInt::get(V->getType(), 1);
2159 
2160   // If we have a vector mapped to this value, return it.
2161   if (VectorLoopValueMap.hasVectorValue(V, Part))
2162     return VectorLoopValueMap.getVectorValue(V, Part);
2163 
2164   // If the value has not been vectorized, check if it has been scalarized
2165   // instead. If it has been scalarized, and we actually need the value in
2166   // vector form, we will construct the vector values on demand.
2167   if (VectorLoopValueMap.hasAnyScalarValue(V)) {
2168     Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0});
2169 
2170     // If we've scalarized a value, that value should be an instruction.
2171     auto *I = cast<Instruction>(V);
2172 
2173     // If we aren't vectorizing, we can just copy the scalar map values over to
2174     // the vector map.
2175     if (VF.isScalar()) {
2176       VectorLoopValueMap.setVectorValue(V, Part, ScalarValue);
2177       return ScalarValue;
2178     }
2179 
2180     // Get the last scalar instruction we generated for V and Part. If the value
2181     // is known to be uniform after vectorization, this corresponds to lane zero
2182     // of the Part unroll iteration. Otherwise, the last instruction is the one
2183     // we created for the last vector lane of the Part unroll iteration.
2184     assert(!VF.isScalable() && "scalable vectors not yet supported.");
2185     unsigned LastLane = Cost->isUniformAfterVectorization(I, VF)
2186                             ? 0
2187                             : VF.getKnownMinValue() - 1;
2188     auto *LastInst = cast<Instruction>(
2189         VectorLoopValueMap.getScalarValue(V, {Part, LastLane}));
2190 
2191     // Set the insert point after the last scalarized instruction. This ensures
2192     // the insertelement sequence will directly follow the scalar definitions.
2193     auto OldIP = Builder.saveIP();
2194     auto NewIP = std::next(BasicBlock::iterator(LastInst));
2195     Builder.SetInsertPoint(&*NewIP);
2196 
2197     // However, if we are vectorizing, we need to construct the vector values.
2198     // If the value is known to be uniform after vectorization, we can just
2199     // broadcast the scalar value corresponding to lane zero for each unroll
2200     // iteration. Otherwise, we construct the vector values using insertelement
2201     // instructions. Since the resulting vectors are stored in
2202     // VectorLoopValueMap, we will only generate the insertelements once.
2203     Value *VectorValue = nullptr;
2204     if (Cost->isUniformAfterVectorization(I, VF)) {
2205       VectorValue = getBroadcastInstrs(ScalarValue);
2206       VectorLoopValueMap.setVectorValue(V, Part, VectorValue);
2207     } else {
2208       // Initialize packing with insertelements to start from undef.
2209       assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2210       Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF));
2211       VectorLoopValueMap.setVectorValue(V, Part, Undef);
2212       for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
2213         packScalarIntoVectorValue(V, {Part, Lane});
2214       VectorValue = VectorLoopValueMap.getVectorValue(V, Part);
2215     }
2216     Builder.restoreIP(OldIP);
2217     return VectorValue;
2218   }
2219 
2220   // If this scalar is unknown, assume that it is a constant or that it is
2221   // loop invariant. Broadcast V and save the value for future uses.
2222   Value *B = getBroadcastInstrs(V);
2223   VectorLoopValueMap.setVectorValue(V, Part, B);
2224   return B;
2225 }
2226 
2227 Value *
2228 InnerLoopVectorizer::getOrCreateScalarValue(Value *V,
2229                                             const VPIteration &Instance) {
2230   // If the value is not an instruction contained in the loop, it should
2231   // already be scalar.
2232   if (OrigLoop->isLoopInvariant(V))
2233     return V;
2234 
2235   assert(Instance.Lane > 0
2236              ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
2237              : true && "Uniform values only have lane zero");
2238 
2239   // If the value from the original loop has not been vectorized, it is
2240   // represented by UF x VF scalar values in the new loop. Return the requested
2241   // scalar value.
2242   if (VectorLoopValueMap.hasScalarValue(V, Instance))
2243     return VectorLoopValueMap.getScalarValue(V, Instance);
2244 
2245   // If the value has not been scalarized, get its entry in VectorLoopValueMap
2246   // for the given unroll part. If this entry is not a vector type (i.e., the
2247   // vectorization factor is one), there is no need to generate an
2248   // extractelement instruction.
2249   auto *U = getOrCreateVectorValue(V, Instance.Part);
2250   if (!U->getType()->isVectorTy()) {
2251     assert(VF.isScalar() && "Value not scalarized has non-vector type");
2252     return U;
2253   }
2254 
2255   // Otherwise, the value from the original loop has been vectorized and is
2256   // represented by UF vector values. Extract and return the requested scalar
2257   // value from the appropriate vector lane.
2258   return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane));
2259 }
2260 
2261 void InnerLoopVectorizer::packScalarIntoVectorValue(
2262     Value *V, const VPIteration &Instance) {
2263   assert(V != Induction && "The new induction variable should not be used.");
2264   assert(!V->getType()->isVectorTy() && "Can't pack a vector");
2265   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2266 
2267   Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance);
2268   Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part);
2269   VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst,
2270                                             Builder.getInt32(Instance.Lane));
2271   VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue);
2272 }
2273 
2274 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2275   assert(Vec->getType()->isVectorTy() && "Invalid type");
2276   assert(!VF.isScalable() && "Cannot reverse scalable vectors");
2277   SmallVector<int, 8> ShuffleMask;
2278   for (unsigned i = 0; i < VF.getKnownMinValue(); ++i)
2279     ShuffleMask.push_back(VF.getKnownMinValue() - i - 1);
2280 
2281   return Builder.CreateShuffleVector(Vec, ShuffleMask, "reverse");
2282 }
2283 
2284 // Return whether we allow using masked interleave-groups (for dealing with
2285 // strided loads/stores that reside in predicated blocks, or for dealing
2286 // with gaps).
2287 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2288   // If an override option has been passed in for interleaved accesses, use it.
2289   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2290     return EnableMaskedInterleavedMemAccesses;
2291 
2292   return TTI.enableMaskedInterleavedAccessVectorization();
2293 }
2294 
2295 // Try to vectorize the interleave group that \p Instr belongs to.
2296 //
2297 // E.g. Translate following interleaved load group (factor = 3):
2298 //   for (i = 0; i < N; i+=3) {
2299 //     R = Pic[i];             // Member of index 0
2300 //     G = Pic[i+1];           // Member of index 1
2301 //     B = Pic[i+2];           // Member of index 2
2302 //     ... // do something to R, G, B
2303 //   }
2304 // To:
2305 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2306 //   %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9>   ; R elements
2307 //   %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10>  ; G elements
2308 //   %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11>  ; B elements
2309 //
2310 // Or translate following interleaved store group (factor = 3):
2311 //   for (i = 0; i < N; i+=3) {
2312 //     ... do something to R, G, B
2313 //     Pic[i]   = R;           // Member of index 0
2314 //     Pic[i+1] = G;           // Member of index 1
2315 //     Pic[i+2] = B;           // Member of index 2
2316 //   }
2317 // To:
2318 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2319 //   %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u>
2320 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2321 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2322 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2323 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2324     const InterleaveGroup<Instruction> *Group, VPTransformState &State,
2325     VPValue *Addr, VPValue *BlockInMask) {
2326   Instruction *Instr = Group->getInsertPos();
2327   const DataLayout &DL = Instr->getModule()->getDataLayout();
2328 
2329   // Prepare for the vector type of the interleaved load/store.
2330   Type *ScalarTy = getMemInstValueType(Instr);
2331   unsigned InterleaveFactor = Group->getFactor();
2332   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2333   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2334 
2335   // Prepare for the new pointers.
2336   SmallVector<Value *, 2> AddrParts;
2337   unsigned Index = Group->getIndex(Instr);
2338 
2339   // TODO: extend the masked interleaved-group support to reversed access.
2340   assert((!BlockInMask || !Group->isReverse()) &&
2341          "Reversed masked interleave-group not supported.");
2342 
2343   // If the group is reverse, adjust the index to refer to the last vector lane
2344   // instead of the first. We adjust the index from the first vector lane,
2345   // rather than directly getting the pointer for lane VF - 1, because the
2346   // pointer operand of the interleaved access is supposed to be uniform. For
2347   // uniform instructions, we're only required to generate a value for the
2348   // first vector lane in each unroll iteration.
2349   assert(!VF.isScalable() &&
2350          "scalable vector reverse operation is not implemented");
2351   if (Group->isReverse())
2352     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2353 
2354   for (unsigned Part = 0; Part < UF; Part++) {
2355     Value *AddrPart = State.get(Addr, {Part, 0});
2356     setDebugLocFromInst(Builder, AddrPart);
2357 
2358     // Notice current instruction could be any index. Need to adjust the address
2359     // to the member of index 0.
2360     //
2361     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2362     //       b = A[i];       // Member of index 0
2363     // Current pointer is pointed to A[i+1], adjust it to A[i].
2364     //
2365     // E.g.  A[i+1] = a;     // Member of index 1
2366     //       A[i]   = b;     // Member of index 0
2367     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2368     // Current pointer is pointed to A[i+2], adjust it to A[i].
2369 
2370     bool InBounds = false;
2371     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2372       InBounds = gep->isInBounds();
2373     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2374     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2375 
2376     // Cast to the vector pointer type.
2377     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2378     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2379     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2380   }
2381 
2382   setDebugLocFromInst(Builder, Instr);
2383   Value *UndefVec = UndefValue::get(VecTy);
2384 
2385   Value *MaskForGaps = nullptr;
2386   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2387     assert(!VF.isScalable() && "scalable vectors not yet supported.");
2388     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2389     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2390   }
2391 
2392   // Vectorize the interleaved load group.
2393   if (isa<LoadInst>(Instr)) {
2394     // For each unroll part, create a wide load for the group.
2395     SmallVector<Value *, 2> NewLoads;
2396     for (unsigned Part = 0; Part < UF; Part++) {
2397       Instruction *NewLoad;
2398       if (BlockInMask || MaskForGaps) {
2399         assert(useMaskedInterleavedAccesses(*TTI) &&
2400                "masked interleaved groups are not allowed.");
2401         Value *GroupMask = MaskForGaps;
2402         if (BlockInMask) {
2403           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2404           assert(!VF.isScalable() && "scalable vectors not yet supported.");
2405           Value *ShuffledMask = Builder.CreateShuffleVector(
2406               BlockInMaskPart,
2407               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2408               "interleaved.mask");
2409           GroupMask = MaskForGaps
2410                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2411                                                 MaskForGaps)
2412                           : ShuffledMask;
2413         }
2414         NewLoad =
2415             Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
2416                                      GroupMask, UndefVec, "wide.masked.vec");
2417       }
2418       else
2419         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2420                                             Group->getAlign(), "wide.vec");
2421       Group->addMetadata(NewLoad);
2422       NewLoads.push_back(NewLoad);
2423     }
2424 
2425     // For each member in the group, shuffle out the appropriate data from the
2426     // wide loads.
2427     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2428       Instruction *Member = Group->getMember(I);
2429 
2430       // Skip the gaps in the group.
2431       if (!Member)
2432         continue;
2433 
2434       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2435       auto StrideMask =
2436           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2437       for (unsigned Part = 0; Part < UF; Part++) {
2438         Value *StridedVec = Builder.CreateShuffleVector(
2439             NewLoads[Part], StrideMask, "strided.vec");
2440 
2441         // If this member has different type, cast the result type.
2442         if (Member->getType() != ScalarTy) {
2443           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2444           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2445           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2446         }
2447 
2448         if (Group->isReverse())
2449           StridedVec = reverseVector(StridedVec);
2450 
2451         VectorLoopValueMap.setVectorValue(Member, Part, StridedVec);
2452       }
2453     }
2454     return;
2455   }
2456 
2457   // The sub vector type for current instruction.
2458   assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2459   auto *SubVT = VectorType::get(ScalarTy, VF);
2460 
2461   // Vectorize the interleaved store group.
2462   for (unsigned Part = 0; Part < UF; Part++) {
2463     // Collect the stored vector from each member.
2464     SmallVector<Value *, 4> StoredVecs;
2465     for (unsigned i = 0; i < InterleaveFactor; i++) {
2466       // Interleaved store group doesn't allow a gap, so each index has a member
2467       Instruction *Member = Group->getMember(i);
2468       assert(Member && "Fail to get a member from an interleaved store group");
2469 
2470       Value *StoredVec = getOrCreateVectorValue(
2471           cast<StoreInst>(Member)->getValueOperand(), Part);
2472       if (Group->isReverse())
2473         StoredVec = reverseVector(StoredVec);
2474 
2475       // If this member has different type, cast it to a unified type.
2476 
2477       if (StoredVec->getType() != SubVT)
2478         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2479 
2480       StoredVecs.push_back(StoredVec);
2481     }
2482 
2483     // Concatenate all vectors into a wide vector.
2484     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2485 
2486     // Interleave the elements in the wide vector.
2487     assert(!VF.isScalable() && "scalable vectors not yet supported.");
2488     Value *IVec = Builder.CreateShuffleVector(
2489         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2490         "interleaved.vec");
2491 
2492     Instruction *NewStoreInstr;
2493     if (BlockInMask) {
2494       Value *BlockInMaskPart = State.get(BlockInMask, Part);
2495       Value *ShuffledMask = Builder.CreateShuffleVector(
2496           BlockInMaskPart,
2497           createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2498           "interleaved.mask");
2499       NewStoreInstr = Builder.CreateMaskedStore(
2500           IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2501     }
2502     else
2503       NewStoreInstr =
2504           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2505 
2506     Group->addMetadata(NewStoreInstr);
2507   }
2508 }
2509 
2510 void InnerLoopVectorizer::vectorizeMemoryInstruction(
2511     Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr,
2512     VPValue *StoredValue, VPValue *BlockInMask) {
2513   // Attempt to issue a wide load.
2514   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2515   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2516 
2517   assert((LI || SI) && "Invalid Load/Store instruction");
2518   assert((!SI || StoredValue) && "No stored value provided for widened store");
2519   assert((!LI || !StoredValue) && "Stored value provided for widened load");
2520 
2521   LoopVectorizationCostModel::InstWidening Decision =
2522       Cost->getWideningDecision(Instr, VF);
2523   assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2524           Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2525           Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2526          "CM decision is not to widen the memory instruction");
2527 
2528   Type *ScalarDataTy = getMemInstValueType(Instr);
2529 
2530   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2531   auto *DataTy = VectorType::get(ScalarDataTy, VF);
2532   const Align Alignment = getLoadStoreAlignment(Instr);
2533 
2534   // Determine if the pointer operand of the access is either consecutive or
2535   // reverse consecutive.
2536   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2537   bool ConsecutiveStride =
2538       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2539   bool CreateGatherScatter =
2540       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2541 
2542   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2543   // gather/scatter. Otherwise Decision should have been to Scalarize.
2544   assert((ConsecutiveStride || CreateGatherScatter) &&
2545          "The instruction should be scalarized");
2546   (void)ConsecutiveStride;
2547 
2548   VectorParts BlockInMaskParts(UF);
2549   bool isMaskRequired = BlockInMask;
2550   if (isMaskRequired)
2551     for (unsigned Part = 0; Part < UF; ++Part)
2552       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2553 
2554   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2555     // Calculate the pointer for the specific unroll-part.
2556     GetElementPtrInst *PartPtr = nullptr;
2557 
2558     bool InBounds = false;
2559     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2560       InBounds = gep->isInBounds();
2561 
2562     if (Reverse) {
2563       // If the address is consecutive but reversed, then the
2564       // wide store needs to start at the last vector element.
2565       PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP(
2566           ScalarDataTy, Ptr, Builder.getInt32(-Part * VF.getKnownMinValue())));
2567       PartPtr->setIsInBounds(InBounds);
2568       PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP(
2569           ScalarDataTy, PartPtr, Builder.getInt32(1 - VF.getKnownMinValue())));
2570       PartPtr->setIsInBounds(InBounds);
2571       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2572         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2573     } else {
2574       PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP(
2575           ScalarDataTy, Ptr, Builder.getInt32(Part * VF.getKnownMinValue())));
2576       PartPtr->setIsInBounds(InBounds);
2577     }
2578 
2579     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2580     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2581   };
2582 
2583   // Handle Stores:
2584   if (SI) {
2585     setDebugLocFromInst(Builder, SI);
2586 
2587     for (unsigned Part = 0; Part < UF; ++Part) {
2588       Instruction *NewSI = nullptr;
2589       Value *StoredVal = State.get(StoredValue, Part);
2590       if (CreateGatherScatter) {
2591         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2592         Value *VectorGep = State.get(Addr, Part);
2593         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2594                                             MaskPart);
2595       } else {
2596         if (Reverse) {
2597           // If we store to reverse consecutive memory locations, then we need
2598           // to reverse the order of elements in the stored value.
2599           StoredVal = reverseVector(StoredVal);
2600           // We don't want to update the value in the map as it might be used in
2601           // another expression. So don't call resetVectorValue(StoredVal).
2602         }
2603         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
2604         if (isMaskRequired)
2605           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2606                                             BlockInMaskParts[Part]);
2607         else
2608           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2609       }
2610       addMetadata(NewSI, SI);
2611     }
2612     return;
2613   }
2614 
2615   // Handle loads.
2616   assert(LI && "Must have a load instruction");
2617   setDebugLocFromInst(Builder, LI);
2618   for (unsigned Part = 0; Part < UF; ++Part) {
2619     Value *NewLI;
2620     if (CreateGatherScatter) {
2621       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2622       Value *VectorGep = State.get(Addr, Part);
2623       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2624                                          nullptr, "wide.masked.gather");
2625       addMetadata(NewLI, LI);
2626     } else {
2627       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
2628       if (isMaskRequired)
2629         NewLI = Builder.CreateMaskedLoad(
2630             VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy),
2631             "wide.masked.load");
2632       else
2633         NewLI =
2634             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
2635 
2636       // Add metadata to the load, but setVectorValue to the reverse shuffle.
2637       addMetadata(NewLI, LI);
2638       if (Reverse)
2639         NewLI = reverseVector(NewLI);
2640     }
2641 
2642     State.set(Def, Instr, NewLI, Part);
2643   }
2644 }
2645 
2646 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPUser &User,
2647                                                const VPIteration &Instance,
2648                                                bool IfPredicateInstr,
2649                                                VPTransformState &State) {
2650   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2651 
2652   setDebugLocFromInst(Builder, Instr);
2653 
2654   // Does this instruction return a value ?
2655   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2656 
2657   Instruction *Cloned = Instr->clone();
2658   if (!IsVoidRetTy)
2659     Cloned->setName(Instr->getName() + ".cloned");
2660 
2661   // Replace the operands of the cloned instructions with their scalar
2662   // equivalents in the new loop.
2663   for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) {
2664     auto *NewOp = State.get(User.getOperand(op), Instance);
2665     Cloned->setOperand(op, NewOp);
2666   }
2667   addNewMetadata(Cloned, Instr);
2668 
2669   // Place the cloned scalar in the new loop.
2670   Builder.Insert(Cloned);
2671 
2672   // Add the cloned scalar to the scalar map entry.
2673   VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned);
2674 
2675   // If we just cloned a new assumption, add it the assumption cache.
2676   if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
2677     if (II->getIntrinsicID() == Intrinsic::assume)
2678       AC->registerAssumption(II);
2679 
2680   // End if-block.
2681   if (IfPredicateInstr)
2682     PredicatedInstructions.push_back(Cloned);
2683 }
2684 
2685 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
2686                                                       Value *End, Value *Step,
2687                                                       Instruction *DL) {
2688   BasicBlock *Header = L->getHeader();
2689   BasicBlock *Latch = L->getLoopLatch();
2690   // As we're just creating this loop, it's possible no latch exists
2691   // yet. If so, use the header as this will be a single block loop.
2692   if (!Latch)
2693     Latch = Header;
2694 
2695   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
2696   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
2697   setDebugLocFromInst(Builder, OldInst);
2698   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
2699 
2700   Builder.SetInsertPoint(Latch->getTerminator());
2701   setDebugLocFromInst(Builder, OldInst);
2702 
2703   // Create i+1 and fill the PHINode.
2704   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
2705   Induction->addIncoming(Start, L->getLoopPreheader());
2706   Induction->addIncoming(Next, Latch);
2707   // Create the compare.
2708   Value *ICmp = Builder.CreateICmpEQ(Next, End);
2709   Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
2710 
2711   // Now we have two terminators. Remove the old one from the block.
2712   Latch->getTerminator()->eraseFromParent();
2713 
2714   return Induction;
2715 }
2716 
2717 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
2718   if (TripCount)
2719     return TripCount;
2720 
2721   assert(L && "Create Trip Count for null loop.");
2722   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2723   // Find the loop boundaries.
2724   ScalarEvolution *SE = PSE.getSE();
2725   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2726   assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
2727          "Invalid loop count");
2728 
2729   Type *IdxTy = Legal->getWidestInductionType();
2730   assert(IdxTy && "No type for induction");
2731 
2732   // The exit count might have the type of i64 while the phi is i32. This can
2733   // happen if we have an induction variable that is sign extended before the
2734   // compare. The only way that we get a backedge taken count is that the
2735   // induction variable was signed and as such will not overflow. In such a case
2736   // truncation is legal.
2737   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
2738       IdxTy->getPrimitiveSizeInBits())
2739     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2740   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2741 
2742   // Get the total trip count from the count by adding 1.
2743   const SCEV *ExitCount = SE->getAddExpr(
2744       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2745 
2746   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
2747 
2748   // Expand the trip count and place the new instructions in the preheader.
2749   // Notice that the pre-header does not change, only the loop body.
2750   SCEVExpander Exp(*SE, DL, "induction");
2751 
2752   // Count holds the overall loop count (N).
2753   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2754                                 L->getLoopPreheader()->getTerminator());
2755 
2756   if (TripCount->getType()->isPointerTy())
2757     TripCount =
2758         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2759                                     L->getLoopPreheader()->getTerminator());
2760 
2761   return TripCount;
2762 }
2763 
2764 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
2765   if (VectorTripCount)
2766     return VectorTripCount;
2767 
2768   Value *TC = getOrCreateTripCount(L);
2769   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2770 
2771   Type *Ty = TC->getType();
2772   // This is where we can make the step a runtime constant.
2773   assert(!VF.isScalable() && "scalable vectorization is not supported yet");
2774   Constant *Step = ConstantInt::get(Ty, VF.getKnownMinValue() * UF);
2775 
2776   // If the tail is to be folded by masking, round the number of iterations N
2777   // up to a multiple of Step instead of rounding down. This is done by first
2778   // adding Step-1 and then rounding down. Note that it's ok if this addition
2779   // overflows: the vector induction variable will eventually wrap to zero given
2780   // that it starts at zero and its Step is a power of two; the loop will then
2781   // exit, with the last early-exit vector comparison also producing all-true.
2782   if (Cost->foldTailByMasking()) {
2783     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
2784            "VF*UF must be a power of 2 when folding tail by masking");
2785     TC = Builder.CreateAdd(
2786         TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up");
2787   }
2788 
2789   // Now we need to generate the expression for the part of the loop that the
2790   // vectorized body will execute. This is equal to N - (N % Step) if scalar
2791   // iterations are not required for correctness, or N - Step, otherwise. Step
2792   // is equal to the vectorization factor (number of SIMD elements) times the
2793   // unroll factor (number of SIMD instructions).
2794   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2795 
2796   // If there is a non-reversed interleaved group that may speculatively access
2797   // memory out-of-bounds, we need to ensure that there will be at least one
2798   // iteration of the scalar epilogue loop. Thus, if the step evenly divides
2799   // the trip count, we set the remainder to be equal to the step. If the step
2800   // does not evenly divide the trip count, no adjustment is necessary since
2801   // there will already be scalar iterations. Note that the minimum iterations
2802   // check ensures that N >= Step.
2803   if (VF.isVector() && Cost->requiresScalarEpilogue()) {
2804     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2805     R = Builder.CreateSelect(IsZero, Step, R);
2806   }
2807 
2808   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2809 
2810   return VectorTripCount;
2811 }
2812 
2813 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
2814                                                    const DataLayout &DL) {
2815   // Verify that V is a vector type with same number of elements as DstVTy.
2816   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
2817   unsigned VF = DstFVTy->getNumElements();
2818   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
2819   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
2820   Type *SrcElemTy = SrcVecTy->getElementType();
2821   Type *DstElemTy = DstFVTy->getElementType();
2822   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2823          "Vector elements must have same size");
2824 
2825   // Do a direct cast if element types are castable.
2826   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2827     return Builder.CreateBitOrPointerCast(V, DstFVTy);
2828   }
2829   // V cannot be directly casted to desired vector type.
2830   // May happen when V is a floating point vector but DstVTy is a vector of
2831   // pointers or vice-versa. Handle this using a two-step bitcast using an
2832   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2833   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2834          "Only one type should be a pointer type");
2835   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2836          "Only one type should be a floating point type");
2837   Type *IntTy =
2838       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2839   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
2840   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2841   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
2842 }
2843 
2844 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
2845                                                          BasicBlock *Bypass) {
2846   Value *Count = getOrCreateTripCount(L);
2847   // Reuse existing vector loop preheader for TC checks.
2848   // Note that new preheader block is generated for vector loop.
2849   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
2850   IRBuilder<> Builder(TCCheckBlock->getTerminator());
2851 
2852   // Generate code to check if the loop's trip count is less than VF * UF, or
2853   // equal to it in case a scalar epilogue is required; this implies that the
2854   // vector trip count is zero. This check also covers the case where adding one
2855   // to the backedge-taken count overflowed leading to an incorrect trip count
2856   // of zero. In this case we will also jump to the scalar loop.
2857   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
2858                                           : ICmpInst::ICMP_ULT;
2859 
2860   // If tail is to be folded, vector loop takes care of all iterations.
2861   Value *CheckMinIters = Builder.getFalse();
2862   if (!Cost->foldTailByMasking()) {
2863     assert(!VF.isScalable() && "scalable vectors not yet supported.");
2864     CheckMinIters = Builder.CreateICmp(
2865         P, Count,
2866         ConstantInt::get(Count->getType(), VF.getKnownMinValue() * UF),
2867         "min.iters.check");
2868   }
2869   // Create new preheader for vector loop.
2870   LoopVectorPreHeader =
2871       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
2872                  "vector.ph");
2873 
2874   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
2875                                DT->getNode(Bypass)->getIDom()) &&
2876          "TC check is expected to dominate Bypass");
2877 
2878   // Update dominator for Bypass & LoopExit.
2879   DT->changeImmediateDominator(Bypass, TCCheckBlock);
2880   DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
2881 
2882   ReplaceInstWithInst(
2883       TCCheckBlock->getTerminator(),
2884       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
2885   LoopBypassBlocks.push_back(TCCheckBlock);
2886 }
2887 
2888 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
2889   // Reuse existing vector loop preheader for SCEV checks.
2890   // Note that new preheader block is generated for vector loop.
2891   BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader;
2892 
2893   // Generate the code to check that the SCEV assumptions that we made.
2894   // We want the new basic block to start at the first instruction in a
2895   // sequence of instructions that form a check.
2896   SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
2897                    "scev.check");
2898   Value *SCEVCheck = Exp.expandCodeForPredicate(
2899       &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator());
2900 
2901   if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
2902     if (C->isZero())
2903       return;
2904 
2905   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
2906            (OptForSizeBasedOnProfile &&
2907             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
2908          "Cannot SCEV check stride or overflow when optimizing for size");
2909 
2910   SCEVCheckBlock->setName("vector.scevcheck");
2911   // Create new preheader for vector loop.
2912   LoopVectorPreHeader =
2913       SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI,
2914                  nullptr, "vector.ph");
2915 
2916   // Update dominator only if this is first RT check.
2917   if (LoopBypassBlocks.empty()) {
2918     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
2919     DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
2920   }
2921 
2922   ReplaceInstWithInst(
2923       SCEVCheckBlock->getTerminator(),
2924       BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck));
2925   LoopBypassBlocks.push_back(SCEVCheckBlock);
2926   AddedSafetyChecks = true;
2927 }
2928 
2929 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
2930   // VPlan-native path does not do any analysis for runtime checks currently.
2931   if (EnableVPlanNativePath)
2932     return;
2933 
2934   // Reuse existing vector loop preheader for runtime memory checks.
2935   // Note that new preheader block is generated for vector loop.
2936   BasicBlock *const MemCheckBlock = L->getLoopPreheader();
2937 
2938   // Generate the code that checks in runtime if arrays overlap. We put the
2939   // checks into a separate block to make the more common case of few elements
2940   // faster.
2941   auto *LAI = Legal->getLAI();
2942   const auto &RtPtrChecking = *LAI->getRuntimePointerChecking();
2943   if (!RtPtrChecking.Need)
2944     return;
2945 
2946   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
2947     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
2948            "Cannot emit memory checks when optimizing for size, unless forced "
2949            "to vectorize.");
2950     ORE->emit([&]() {
2951       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
2952                                         L->getStartLoc(), L->getHeader())
2953              << "Code-size may be reduced by not forcing "
2954                 "vectorization, or by source-code modifications "
2955                 "eliminating the need for runtime checks "
2956                 "(e.g., adding 'restrict').";
2957     });
2958   }
2959 
2960   MemCheckBlock->setName("vector.memcheck");
2961   // Create new preheader for vector loop.
2962   LoopVectorPreHeader =
2963       SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr,
2964                  "vector.ph");
2965 
2966   auto *CondBranch = cast<BranchInst>(
2967       Builder.CreateCondBr(Builder.getTrue(), Bypass, LoopVectorPreHeader));
2968   ReplaceInstWithInst(MemCheckBlock->getTerminator(), CondBranch);
2969   LoopBypassBlocks.push_back(MemCheckBlock);
2970   AddedSafetyChecks = true;
2971 
2972   // Update dominator only if this is first RT check.
2973   if (LoopBypassBlocks.empty()) {
2974     DT->changeImmediateDominator(Bypass, MemCheckBlock);
2975     DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock);
2976   }
2977 
2978   Instruction *FirstCheckInst;
2979   Instruction *MemRuntimeCheck;
2980   std::tie(FirstCheckInst, MemRuntimeCheck) =
2981       addRuntimeChecks(MemCheckBlock->getTerminator(), OrigLoop,
2982                        RtPtrChecking.getChecks(), RtPtrChecking.getSE());
2983   assert(MemRuntimeCheck && "no RT checks generated although RtPtrChecking "
2984                             "claimed checks are required");
2985   CondBranch->setCondition(MemRuntimeCheck);
2986 
2987   // We currently don't use LoopVersioning for the actual loop cloning but we
2988   // still use it to add the noalias metadata.
2989   LVer = std::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
2990                                           PSE.getSE());
2991   LVer->prepareNoAliasMetadata();
2992 }
2993 
2994 Value *InnerLoopVectorizer::emitTransformedIndex(
2995     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
2996     const InductionDescriptor &ID) const {
2997 
2998   SCEVExpander Exp(*SE, DL, "induction");
2999   auto Step = ID.getStep();
3000   auto StartValue = ID.getStartValue();
3001   assert(Index->getType() == Step->getType() &&
3002          "Index type does not match StepValue type");
3003 
3004   // Note: the IR at this point is broken. We cannot use SE to create any new
3005   // SCEV and then expand it, hoping that SCEV's simplification will give us
3006   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3007   // lead to various SCEV crashes. So all we can do is to use builder and rely
3008   // on InstCombine for future simplifications. Here we handle some trivial
3009   // cases only.
3010   auto CreateAdd = [&B](Value *X, Value *Y) {
3011     assert(X->getType() == Y->getType() && "Types don't match!");
3012     if (auto *CX = dyn_cast<ConstantInt>(X))
3013       if (CX->isZero())
3014         return Y;
3015     if (auto *CY = dyn_cast<ConstantInt>(Y))
3016       if (CY->isZero())
3017         return X;
3018     return B.CreateAdd(X, Y);
3019   };
3020 
3021   auto CreateMul = [&B](Value *X, Value *Y) {
3022     assert(X->getType() == Y->getType() && "Types don't match!");
3023     if (auto *CX = dyn_cast<ConstantInt>(X))
3024       if (CX->isOne())
3025         return Y;
3026     if (auto *CY = dyn_cast<ConstantInt>(Y))
3027       if (CY->isOne())
3028         return X;
3029     return B.CreateMul(X, Y);
3030   };
3031 
3032   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3033   // loop, choose the end of the vector loop header (=LoopVectorBody), because
3034   // the DomTree is not kept up-to-date for additional blocks generated in the
3035   // vector loop. By using the header as insertion point, we guarantee that the
3036   // expanded instructions dominate all their uses.
3037   auto GetInsertPoint = [this, &B]() {
3038     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3039     if (InsertBB != LoopVectorBody &&
3040         LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB))
3041       return LoopVectorBody->getTerminator();
3042     return &*B.GetInsertPoint();
3043   };
3044   switch (ID.getKind()) {
3045   case InductionDescriptor::IK_IntInduction: {
3046     assert(Index->getType() == StartValue->getType() &&
3047            "Index type does not match StartValue type");
3048     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3049       return B.CreateSub(StartValue, Index);
3050     auto *Offset = CreateMul(
3051         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3052     return CreateAdd(StartValue, Offset);
3053   }
3054   case InductionDescriptor::IK_PtrInduction: {
3055     assert(isa<SCEVConstant>(Step) &&
3056            "Expected constant step for pointer induction");
3057     return B.CreateGEP(
3058         StartValue->getType()->getPointerElementType(), StartValue,
3059         CreateMul(Index,
3060                   Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())));
3061   }
3062   case InductionDescriptor::IK_FpInduction: {
3063     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3064     auto InductionBinOp = ID.getInductionBinOp();
3065     assert(InductionBinOp &&
3066            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3067             InductionBinOp->getOpcode() == Instruction::FSub) &&
3068            "Original bin op should be defined for FP induction");
3069 
3070     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3071 
3072     // Floating point operations had to be 'fast' to enable the induction.
3073     FastMathFlags Flags;
3074     Flags.setFast();
3075 
3076     Value *MulExp = B.CreateFMul(StepValue, Index);
3077     if (isa<Instruction>(MulExp))
3078       // We have to check, the MulExp may be a constant.
3079       cast<Instruction>(MulExp)->setFastMathFlags(Flags);
3080 
3081     Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3082                                "induction");
3083     if (isa<Instruction>(BOp))
3084       cast<Instruction>(BOp)->setFastMathFlags(Flags);
3085 
3086     return BOp;
3087   }
3088   case InductionDescriptor::IK_NoInduction:
3089     return nullptr;
3090   }
3091   llvm_unreachable("invalid enum");
3092 }
3093 
3094 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3095   LoopScalarBody = OrigLoop->getHeader();
3096   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3097   LoopExitBlock = OrigLoop->getExitBlock();
3098   assert(LoopExitBlock && "Must have an exit block");
3099   assert(LoopVectorPreHeader && "Invalid loop structure");
3100 
3101   LoopMiddleBlock =
3102       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3103                  LI, nullptr, Twine(Prefix) + "middle.block");
3104   LoopScalarPreHeader =
3105       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3106                  nullptr, Twine(Prefix) + "scalar.ph");
3107   // We intentionally don't let SplitBlock to update LoopInfo since
3108   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3109   // LoopVectorBody is explicitly added to the correct place few lines later.
3110   LoopVectorBody =
3111       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3112                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3113 
3114   // Update dominator for loop exit.
3115   DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3116 
3117   // Create and register the new vector loop.
3118   Loop *Lp = LI->AllocateLoop();
3119   Loop *ParentLoop = OrigLoop->getParentLoop();
3120 
3121   // Insert the new loop into the loop nest and register the new basic blocks
3122   // before calling any utilities such as SCEV that require valid LoopInfo.
3123   if (ParentLoop) {
3124     ParentLoop->addChildLoop(Lp);
3125   } else {
3126     LI->addTopLevelLoop(Lp);
3127   }
3128   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3129   return Lp;
3130 }
3131 
3132 void InnerLoopVectorizer::createInductionResumeValues(Loop *L,
3133                                                       Value *VectorTripCount) {
3134   assert(VectorTripCount && L && "Expected valid arguments");
3135   // We are going to resume the execution of the scalar loop.
3136   // Go over all of the induction variables that we found and fix the
3137   // PHIs that are left in the scalar version of the loop.
3138   // The starting values of PHI nodes depend on the counter of the last
3139   // iteration in the vectorized loop.
3140   // If we come from a bypass edge then we need to start from the original
3141   // start value.
3142   for (auto &InductionEntry : Legal->getInductionVars()) {
3143     PHINode *OrigPhi = InductionEntry.first;
3144     InductionDescriptor II = InductionEntry.second;
3145 
3146     // Create phi nodes to merge from the  backedge-taken check block.
3147     PHINode *BCResumeVal =
3148         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3149                         LoopScalarPreHeader->getTerminator());
3150     // Copy original phi DL over to the new one.
3151     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3152     Value *&EndValue = IVEndValues[OrigPhi];
3153     if (OrigPhi == OldInduction) {
3154       // We know what the end value is.
3155       EndValue = VectorTripCount;
3156     } else {
3157       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3158       Type *StepType = II.getStep()->getType();
3159       Instruction::CastOps CastOp =
3160           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3161       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3162       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3163       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3164       EndValue->setName("ind.end");
3165     }
3166 
3167     // The new PHI merges the original incoming value, in case of a bypass,
3168     // or the value at the end of the vectorized loop.
3169     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3170 
3171     // Fix the scalar body counter (PHI node).
3172     // The old induction's phi node in the scalar body needs the truncated
3173     // value.
3174     for (BasicBlock *BB : LoopBypassBlocks)
3175       BCResumeVal->addIncoming(II.getStartValue(), BB);
3176     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3177   }
3178 }
3179 
3180 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3181                                                       MDNode *OrigLoopID) {
3182   assert(L && "Expected valid loop.");
3183 
3184   // The trip counts should be cached by now.
3185   Value *Count = getOrCreateTripCount(L);
3186   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3187 
3188   // We need the OrigLoop (scalar loop part) latch terminator to help
3189   // produce correct debug info for the middle block BB instructions.
3190   // The legality check stage guarantees that the loop will have a single
3191   // latch.
3192   assert(isa<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()) &&
3193          "Scalar loop latch terminator isn't a branch");
3194   BranchInst *ScalarLatchBr =
3195       cast<BranchInst>(OrigLoop->getLoopLatch()->getTerminator());
3196 
3197   // Add a check in the middle block to see if we have completed
3198   // all of the iterations in the first vector loop.
3199   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3200   // If tail is to be folded, we know we don't need to run the remainder.
3201   Value *CmpN = Builder.getTrue();
3202   if (!Cost->foldTailByMasking()) {
3203     CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
3204                            VectorTripCount, "cmp.n",
3205                            LoopMiddleBlock->getTerminator());
3206 
3207     // Here we use the same DebugLoc as the scalar loop latch branch instead
3208     // of the corresponding compare because they may have ended up with
3209     // different line numbers and we want to avoid awkward line stepping while
3210     // debugging. Eg. if the compare has got a line number inside the loop.
3211     cast<Instruction>(CmpN)->setDebugLoc(ScalarLatchBr->getDebugLoc());
3212   }
3213 
3214   BranchInst *BrInst =
3215       BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, CmpN);
3216   BrInst->setDebugLoc(ScalarLatchBr->getDebugLoc());
3217   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3218 
3219   // Get ready to start creating new instructions into the vectorized body.
3220   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3221          "Inconsistent vector loop preheader");
3222   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3223 
3224   Optional<MDNode *> VectorizedLoopID =
3225       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3226                                       LLVMLoopVectorizeFollowupVectorized});
3227   if (VectorizedLoopID.hasValue()) {
3228     L->setLoopID(VectorizedLoopID.getValue());
3229 
3230     // Do not setAlreadyVectorized if loop attributes have been defined
3231     // explicitly.
3232     return LoopVectorPreHeader;
3233   }
3234 
3235   // Keep all loop hints from the original loop on the vector loop (we'll
3236   // replace the vectorizer-specific hints below).
3237   if (MDNode *LID = OrigLoop->getLoopID())
3238     L->setLoopID(LID);
3239 
3240   LoopVectorizeHints Hints(L, true, *ORE);
3241   Hints.setAlreadyVectorized();
3242 
3243 #ifdef EXPENSIVE_CHECKS
3244   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3245   LI->verify(*DT);
3246 #endif
3247 
3248   return LoopVectorPreHeader;
3249 }
3250 
3251 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3252   /*
3253    In this function we generate a new loop. The new loop will contain
3254    the vectorized instructions while the old loop will continue to run the
3255    scalar remainder.
3256 
3257        [ ] <-- loop iteration number check.
3258     /   |
3259    /    v
3260   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3261   |  /  |
3262   | /   v
3263   ||   [ ]     <-- vector pre header.
3264   |/    |
3265   |     v
3266   |    [  ] \
3267   |    [  ]_|   <-- vector loop.
3268   |     |
3269   |     v
3270   |   -[ ]   <--- middle-block.
3271   |  /  |
3272   | /   v
3273   -|- >[ ]     <--- new preheader.
3274    |    |
3275    |    v
3276    |   [ ] \
3277    |   [ ]_|   <-- old scalar loop to handle remainder.
3278     \   |
3279      \  v
3280       >[ ]     <-- exit block.
3281    ...
3282    */
3283 
3284   // Get the metadata of the original loop before it gets modified.
3285   MDNode *OrigLoopID = OrigLoop->getLoopID();
3286 
3287   // Create an empty vector loop, and prepare basic blocks for the runtime
3288   // checks.
3289   Loop *Lp = createVectorLoopSkeleton("");
3290 
3291   // Now, compare the new count to zero. If it is zero skip the vector loop and
3292   // jump to the scalar loop. This check also covers the case where the
3293   // backedge-taken count is uint##_max: adding one to it will overflow leading
3294   // to an incorrect trip count of zero. In this (rare) case we will also jump
3295   // to the scalar loop.
3296   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3297 
3298   // Generate the code to check any assumptions that we've made for SCEV
3299   // expressions.
3300   emitSCEVChecks(Lp, LoopScalarPreHeader);
3301 
3302   // Generate the code that checks in runtime if arrays overlap. We put the
3303   // checks into a separate block to make the more common case of few elements
3304   // faster.
3305   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3306 
3307   // Some loops have a single integer induction variable, while other loops
3308   // don't. One example is c++ iterators that often have multiple pointer
3309   // induction variables. In the code below we also support a case where we
3310   // don't have a single induction variable.
3311   //
3312   // We try to obtain an induction variable from the original loop as hard
3313   // as possible. However if we don't find one that:
3314   //   - is an integer
3315   //   - counts from zero, stepping by one
3316   //   - is the size of the widest induction variable type
3317   // then we create a new one.
3318   OldInduction = Legal->getPrimaryInduction();
3319   Type *IdxTy = Legal->getWidestInductionType();
3320   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3321   // The loop step is equal to the vectorization factor (num of SIMD elements)
3322   // times the unroll factor (num of SIMD instructions).
3323   assert(!VF.isScalable() && "scalable vectors not yet supported.");
3324   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
3325   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3326   Induction =
3327       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3328                               getDebugLocFromInstOrOperands(OldInduction));
3329 
3330   // Emit phis for the new starting index of the scalar loop.
3331   createInductionResumeValues(Lp, CountRoundDown);
3332 
3333   return completeLoopSkeleton(Lp, OrigLoopID);
3334 }
3335 
3336 // Fix up external users of the induction variable. At this point, we are
3337 // in LCSSA form, with all external PHIs that use the IV having one input value,
3338 // coming from the remainder loop. We need those PHIs to also have a correct
3339 // value for the IV when arriving directly from the middle block.
3340 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3341                                        const InductionDescriptor &II,
3342                                        Value *CountRoundDown, Value *EndValue,
3343                                        BasicBlock *MiddleBlock) {
3344   // There are two kinds of external IV usages - those that use the value
3345   // computed in the last iteration (the PHI) and those that use the penultimate
3346   // value (the value that feeds into the phi from the loop latch).
3347   // We allow both, but they, obviously, have different values.
3348 
3349   assert(OrigLoop->getExitBlock() && "Expected a single exit block");
3350 
3351   DenseMap<Value *, Value *> MissingVals;
3352 
3353   // An external user of the last iteration's value should see the value that
3354   // the remainder loop uses to initialize its own IV.
3355   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3356   for (User *U : PostInc->users()) {
3357     Instruction *UI = cast<Instruction>(U);
3358     if (!OrigLoop->contains(UI)) {
3359       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3360       MissingVals[UI] = EndValue;
3361     }
3362   }
3363 
3364   // An external user of the penultimate value need to see EndValue - Step.
3365   // The simplest way to get this is to recompute it from the constituent SCEVs,
3366   // that is Start + (Step * (CRD - 1)).
3367   for (User *U : OrigPhi->users()) {
3368     auto *UI = cast<Instruction>(U);
3369     if (!OrigLoop->contains(UI)) {
3370       const DataLayout &DL =
3371           OrigLoop->getHeader()->getModule()->getDataLayout();
3372       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3373 
3374       IRBuilder<> B(MiddleBlock->getTerminator());
3375       Value *CountMinusOne = B.CreateSub(
3376           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3377       Value *CMO =
3378           !II.getStep()->getType()->isIntegerTy()
3379               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3380                              II.getStep()->getType())
3381               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3382       CMO->setName("cast.cmo");
3383       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3384       Escape->setName("ind.escape");
3385       MissingVals[UI] = Escape;
3386     }
3387   }
3388 
3389   for (auto &I : MissingVals) {
3390     PHINode *PHI = cast<PHINode>(I.first);
3391     // One corner case we have to handle is two IVs "chasing" each-other,
3392     // that is %IV2 = phi [...], [ %IV1, %latch ]
3393     // In this case, if IV1 has an external use, we need to avoid adding both
3394     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3395     // don't already have an incoming value for the middle block.
3396     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3397       PHI->addIncoming(I.second, MiddleBlock);
3398   }
3399 }
3400 
3401 namespace {
3402 
3403 struct CSEDenseMapInfo {
3404   static bool canHandle(const Instruction *I) {
3405     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3406            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3407   }
3408 
3409   static inline Instruction *getEmptyKey() {
3410     return DenseMapInfo<Instruction *>::getEmptyKey();
3411   }
3412 
3413   static inline Instruction *getTombstoneKey() {
3414     return DenseMapInfo<Instruction *>::getTombstoneKey();
3415   }
3416 
3417   static unsigned getHashValue(const Instruction *I) {
3418     assert(canHandle(I) && "Unknown instruction!");
3419     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3420                                                            I->value_op_end()));
3421   }
3422 
3423   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3424     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3425         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3426       return LHS == RHS;
3427     return LHS->isIdenticalTo(RHS);
3428   }
3429 };
3430 
3431 } // end anonymous namespace
3432 
3433 ///Perform cse of induction variable instructions.
3434 static void cse(BasicBlock *BB) {
3435   // Perform simple cse.
3436   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3437   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3438     Instruction *In = &*I++;
3439 
3440     if (!CSEDenseMapInfo::canHandle(In))
3441       continue;
3442 
3443     // Check if we can replace this instruction with any of the
3444     // visited instructions.
3445     if (Instruction *V = CSEMap.lookup(In)) {
3446       In->replaceAllUsesWith(V);
3447       In->eraseFromParent();
3448       continue;
3449     }
3450 
3451     CSEMap[In] = In;
3452   }
3453 }
3454 
3455 unsigned LoopVectorizationCostModel::getVectorCallCost(CallInst *CI,
3456                                                        ElementCount VF,
3457                                                        bool &NeedToScalarize) {
3458   assert(!VF.isScalable() && "scalable vectors not yet supported.");
3459   Function *F = CI->getCalledFunction();
3460   Type *ScalarRetTy = CI->getType();
3461   SmallVector<Type *, 4> Tys, ScalarTys;
3462   for (auto &ArgOp : CI->arg_operands())
3463     ScalarTys.push_back(ArgOp->getType());
3464 
3465   // Estimate cost of scalarized vector call. The source operands are assumed
3466   // to be vectors, so we need to extract individual elements from there,
3467   // execute VF scalar calls, and then gather the result into the vector return
3468   // value.
3469   unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys,
3470                                                  TTI::TCK_RecipThroughput);
3471   if (VF.isScalar())
3472     return ScalarCallCost;
3473 
3474   // Compute corresponding vector type for return value and arguments.
3475   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3476   for (Type *ScalarTy : ScalarTys)
3477     Tys.push_back(ToVectorTy(ScalarTy, VF));
3478 
3479   // Compute costs of unpacking argument values for the scalar calls and
3480   // packing the return values to a vector.
3481   unsigned ScalarizationCost = getScalarizationOverhead(CI, VF);
3482 
3483   unsigned Cost = ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3484 
3485   // If we can't emit a vector call for this function, then the currently found
3486   // cost is the cost we need to return.
3487   NeedToScalarize = true;
3488   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3489   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3490 
3491   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3492     return Cost;
3493 
3494   // If the corresponding vector cost is cheaper, return its cost.
3495   unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys,
3496                                                  TTI::TCK_RecipThroughput);
3497   if (VectorCallCost < Cost) {
3498     NeedToScalarize = false;
3499     return VectorCallCost;
3500   }
3501   return Cost;
3502 }
3503 
3504 unsigned LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3505                                                             ElementCount VF) {
3506   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3507   assert(ID && "Expected intrinsic call!");
3508 
3509   IntrinsicCostAttributes CostAttrs(ID, *CI, VF);
3510   return TTI.getIntrinsicInstrCost(CostAttrs,
3511                                    TargetTransformInfo::TCK_RecipThroughput);
3512 }
3513 
3514 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3515   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3516   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3517   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3518 }
3519 
3520 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3521   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3522   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3523   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3524 }
3525 
3526 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
3527   // For every instruction `I` in MinBWs, truncate the operands, create a
3528   // truncated version of `I` and reextend its result. InstCombine runs
3529   // later and will remove any ext/trunc pairs.
3530   SmallPtrSet<Value *, 4> Erased;
3531   for (const auto &KV : Cost->getMinimalBitwidths()) {
3532     // If the value wasn't vectorized, we must maintain the original scalar
3533     // type. The absence of the value from VectorLoopValueMap indicates that it
3534     // wasn't vectorized.
3535     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3536       continue;
3537     for (unsigned Part = 0; Part < UF; ++Part) {
3538       Value *I = getOrCreateVectorValue(KV.first, Part);
3539       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3540         continue;
3541       Type *OriginalTy = I->getType();
3542       Type *ScalarTruncatedTy =
3543           IntegerType::get(OriginalTy->getContext(), KV.second);
3544       auto *TruncatedTy = FixedVectorType::get(
3545           ScalarTruncatedTy,
3546           cast<FixedVectorType>(OriginalTy)->getNumElements());
3547       if (TruncatedTy == OriginalTy)
3548         continue;
3549 
3550       IRBuilder<> B(cast<Instruction>(I));
3551       auto ShrinkOperand = [&](Value *V) -> Value * {
3552         if (auto *ZI = dyn_cast<ZExtInst>(V))
3553           if (ZI->getSrcTy() == TruncatedTy)
3554             return ZI->getOperand(0);
3555         return B.CreateZExtOrTrunc(V, TruncatedTy);
3556       };
3557 
3558       // The actual instruction modification depends on the instruction type,
3559       // unfortunately.
3560       Value *NewI = nullptr;
3561       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3562         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3563                              ShrinkOperand(BO->getOperand(1)));
3564 
3565         // Any wrapping introduced by shrinking this operation shouldn't be
3566         // considered undefined behavior. So, we can't unconditionally copy
3567         // arithmetic wrapping flags to NewI.
3568         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3569       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3570         NewI =
3571             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3572                          ShrinkOperand(CI->getOperand(1)));
3573       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3574         NewI = B.CreateSelect(SI->getCondition(),
3575                               ShrinkOperand(SI->getTrueValue()),
3576                               ShrinkOperand(SI->getFalseValue()));
3577       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3578         switch (CI->getOpcode()) {
3579         default:
3580           llvm_unreachable("Unhandled cast!");
3581         case Instruction::Trunc:
3582           NewI = ShrinkOperand(CI->getOperand(0));
3583           break;
3584         case Instruction::SExt:
3585           NewI = B.CreateSExtOrTrunc(
3586               CI->getOperand(0),
3587               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3588           break;
3589         case Instruction::ZExt:
3590           NewI = B.CreateZExtOrTrunc(
3591               CI->getOperand(0),
3592               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3593           break;
3594         }
3595       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3596         auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType())
3597                              ->getNumElements();
3598         auto *O0 = B.CreateZExtOrTrunc(
3599             SI->getOperand(0),
3600             FixedVectorType::get(ScalarTruncatedTy, Elements0));
3601         auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType())
3602                              ->getNumElements();
3603         auto *O1 = B.CreateZExtOrTrunc(
3604             SI->getOperand(1),
3605             FixedVectorType::get(ScalarTruncatedTy, Elements1));
3606 
3607         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3608       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3609         // Don't do anything with the operands, just extend the result.
3610         continue;
3611       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3612         auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType())
3613                             ->getNumElements();
3614         auto *O0 = B.CreateZExtOrTrunc(
3615             IE->getOperand(0),
3616             FixedVectorType::get(ScalarTruncatedTy, Elements));
3617         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3618         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3619       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3620         auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType())
3621                             ->getNumElements();
3622         auto *O0 = B.CreateZExtOrTrunc(
3623             EE->getOperand(0),
3624             FixedVectorType::get(ScalarTruncatedTy, Elements));
3625         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3626       } else {
3627         // If we don't know what to do, be conservative and don't do anything.
3628         continue;
3629       }
3630 
3631       // Lastly, extend the result.
3632       NewI->takeName(cast<Instruction>(I));
3633       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3634       I->replaceAllUsesWith(Res);
3635       cast<Instruction>(I)->eraseFromParent();
3636       Erased.insert(I);
3637       VectorLoopValueMap.resetVectorValue(KV.first, Part, Res);
3638     }
3639   }
3640 
3641   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3642   for (const auto &KV : Cost->getMinimalBitwidths()) {
3643     // If the value wasn't vectorized, we must maintain the original scalar
3644     // type. The absence of the value from VectorLoopValueMap indicates that it
3645     // wasn't vectorized.
3646     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3647       continue;
3648     for (unsigned Part = 0; Part < UF; ++Part) {
3649       Value *I = getOrCreateVectorValue(KV.first, Part);
3650       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3651       if (Inst && Inst->use_empty()) {
3652         Value *NewI = Inst->getOperand(0);
3653         Inst->eraseFromParent();
3654         VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI);
3655       }
3656     }
3657   }
3658 }
3659 
3660 void InnerLoopVectorizer::fixVectorizedLoop() {
3661   // Insert truncates and extends for any truncated instructions as hints to
3662   // InstCombine.
3663   if (VF.isVector())
3664     truncateToMinimalBitwidths();
3665 
3666   // Fix widened non-induction PHIs by setting up the PHI operands.
3667   if (OrigPHIsToFix.size()) {
3668     assert(EnableVPlanNativePath &&
3669            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3670     fixNonInductionPHIs();
3671   }
3672 
3673   // At this point every instruction in the original loop is widened to a
3674   // vector form. Now we need to fix the recurrences in the loop. These PHI
3675   // nodes are currently empty because we did not want to introduce cycles.
3676   // This is the second stage of vectorizing recurrences.
3677   fixCrossIterationPHIs();
3678 
3679   // Forget the original basic block.
3680   PSE.getSE()->forgetLoop(OrigLoop);
3681 
3682   // Fix-up external users of the induction variables.
3683   for (auto &Entry : Legal->getInductionVars())
3684     fixupIVUsers(Entry.first, Entry.second,
3685                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3686                  IVEndValues[Entry.first], LoopMiddleBlock);
3687 
3688   fixLCSSAPHIs();
3689   for (Instruction *PI : PredicatedInstructions)
3690     sinkScalarOperands(&*PI);
3691 
3692   // Remove redundant induction instructions.
3693   cse(LoopVectorBody);
3694 
3695   // Set/update profile weights for the vector and remainder loops as original
3696   // loop iterations are now distributed among them. Note that original loop
3697   // represented by LoopScalarBody becomes remainder loop after vectorization.
3698   //
3699   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
3700   // end up getting slightly roughened result but that should be OK since
3701   // profile is not inherently precise anyway. Note also possible bypass of
3702   // vector code caused by legality checks is ignored, assigning all the weight
3703   // to the vector loop, optimistically.
3704   assert(!VF.isScalable() &&
3705          "cannot use scalable ElementCount to determine unroll factor");
3706   setProfileInfoAfterUnrolling(
3707       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
3708       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
3709 }
3710 
3711 void InnerLoopVectorizer::fixCrossIterationPHIs() {
3712   // In order to support recurrences we need to be able to vectorize Phi nodes.
3713   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3714   // stage #2: We now need to fix the recurrences by adding incoming edges to
3715   // the currently empty PHI nodes. At this point every instruction in the
3716   // original loop is widened to a vector form so we can use them to construct
3717   // the incoming edges.
3718   for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
3719     // Handle first-order recurrences and reductions that need to be fixed.
3720     if (Legal->isFirstOrderRecurrence(&Phi))
3721       fixFirstOrderRecurrence(&Phi);
3722     else if (Legal->isReductionVariable(&Phi))
3723       fixReduction(&Phi);
3724   }
3725 }
3726 
3727 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
3728   // This is the second phase of vectorizing first-order recurrences. An
3729   // overview of the transformation is described below. Suppose we have the
3730   // following loop.
3731   //
3732   //   for (int i = 0; i < n; ++i)
3733   //     b[i] = a[i] - a[i - 1];
3734   //
3735   // There is a first-order recurrence on "a". For this loop, the shorthand
3736   // scalar IR looks like:
3737   //
3738   //   scalar.ph:
3739   //     s_init = a[-1]
3740   //     br scalar.body
3741   //
3742   //   scalar.body:
3743   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3744   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3745   //     s2 = a[i]
3746   //     b[i] = s2 - s1
3747   //     br cond, scalar.body, ...
3748   //
3749   // In this example, s1 is a recurrence because it's value depends on the
3750   // previous iteration. In the first phase of vectorization, we created a
3751   // temporary value for s1. We now complete the vectorization and produce the
3752   // shorthand vector IR shown below (for VF = 4, UF = 1).
3753   //
3754   //   vector.ph:
3755   //     v_init = vector(..., ..., ..., a[-1])
3756   //     br vector.body
3757   //
3758   //   vector.body
3759   //     i = phi [0, vector.ph], [i+4, vector.body]
3760   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
3761   //     v2 = a[i, i+1, i+2, i+3];
3762   //     v3 = vector(v1(3), v2(0, 1, 2))
3763   //     b[i, i+1, i+2, i+3] = v2 - v3
3764   //     br cond, vector.body, middle.block
3765   //
3766   //   middle.block:
3767   //     x = v2(3)
3768   //     br scalar.ph
3769   //
3770   //   scalar.ph:
3771   //     s_init = phi [x, middle.block], [a[-1], otherwise]
3772   //     br scalar.body
3773   //
3774   // After execution completes the vector loop, we extract the next value of
3775   // the recurrence (x) to use as the initial value in the scalar loop.
3776 
3777   // Get the original loop preheader and single loop latch.
3778   auto *Preheader = OrigLoop->getLoopPreheader();
3779   auto *Latch = OrigLoop->getLoopLatch();
3780 
3781   // Get the initial and previous values of the scalar recurrence.
3782   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
3783   auto *Previous = Phi->getIncomingValueForBlock(Latch);
3784 
3785   // Create a vector from the initial value.
3786   auto *VectorInit = ScalarInit;
3787   if (VF.isVector()) {
3788     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3789     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
3790     VectorInit = Builder.CreateInsertElement(
3791         UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
3792         Builder.getInt32(VF.getKnownMinValue() - 1), "vector.recur.init");
3793   }
3794 
3795   // We constructed a temporary phi node in the first phase of vectorization.
3796   // This phi node will eventually be deleted.
3797   Builder.SetInsertPoint(
3798       cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0)));
3799 
3800   // Create a phi node for the new recurrence. The current value will either be
3801   // the initial value inserted into a vector or loop-varying vector value.
3802   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
3803   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
3804 
3805   // Get the vectorized previous value of the last part UF - 1. It appears last
3806   // among all unrolled iterations, due to the order of their construction.
3807   Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1);
3808 
3809   // Find and set the insertion point after the previous value if it is an
3810   // instruction.
3811   BasicBlock::iterator InsertPt;
3812   // Note that the previous value may have been constant-folded so it is not
3813   // guaranteed to be an instruction in the vector loop.
3814   // FIXME: Loop invariant values do not form recurrences. We should deal with
3815   //        them earlier.
3816   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
3817     InsertPt = LoopVectorBody->getFirstInsertionPt();
3818   else {
3819     Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
3820     if (isa<PHINode>(PreviousLastPart))
3821       // If the previous value is a phi node, we should insert after all the phi
3822       // nodes in the block containing the PHI to avoid breaking basic block
3823       // verification. Note that the basic block may be different to
3824       // LoopVectorBody, in case we predicate the loop.
3825       InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
3826     else
3827       InsertPt = ++PreviousInst->getIterator();
3828   }
3829   Builder.SetInsertPoint(&*InsertPt);
3830 
3831   // We will construct a vector for the recurrence by combining the values for
3832   // the current and previous iterations. This is the required shuffle mask.
3833   assert(!VF.isScalable());
3834   SmallVector<int, 8> ShuffleMask(VF.getKnownMinValue());
3835   ShuffleMask[0] = VF.getKnownMinValue() - 1;
3836   for (unsigned I = 1; I < VF.getKnownMinValue(); ++I)
3837     ShuffleMask[I] = I + VF.getKnownMinValue() - 1;
3838 
3839   // The vector from which to take the initial value for the current iteration
3840   // (actual or unrolled). Initially, this is the vector phi node.
3841   Value *Incoming = VecPhi;
3842 
3843   // Shuffle the current and previous vector and update the vector parts.
3844   for (unsigned Part = 0; Part < UF; ++Part) {
3845     Value *PreviousPart = getOrCreateVectorValue(Previous, Part);
3846     Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part);
3847     auto *Shuffle =
3848         VF.isVector()
3849             ? Builder.CreateShuffleVector(Incoming, PreviousPart, ShuffleMask)
3850             : Incoming;
3851     PhiPart->replaceAllUsesWith(Shuffle);
3852     cast<Instruction>(PhiPart)->eraseFromParent();
3853     VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle);
3854     Incoming = PreviousPart;
3855   }
3856 
3857   // Fix the latch value of the new recurrence in the vector loop.
3858   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3859 
3860   // Extract the last vector element in the middle block. This will be the
3861   // initial value for the recurrence when jumping to the scalar loop.
3862   auto *ExtractForScalar = Incoming;
3863   if (VF.isVector()) {
3864     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3865     ExtractForScalar = Builder.CreateExtractElement(
3866         ExtractForScalar, Builder.getInt32(VF.getKnownMinValue() - 1),
3867         "vector.recur.extract");
3868   }
3869   // Extract the second last element in the middle block if the
3870   // Phi is used outside the loop. We need to extract the phi itself
3871   // and not the last element (the phi update in the current iteration). This
3872   // will be the value when jumping to the exit block from the LoopMiddleBlock,
3873   // when the scalar loop is not run at all.
3874   Value *ExtractForPhiUsedOutsideLoop = nullptr;
3875   if (VF.isVector())
3876     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
3877         Incoming, Builder.getInt32(VF.getKnownMinValue() - 2),
3878         "vector.recur.extract.for.phi");
3879   // When loop is unrolled without vectorizing, initialize
3880   // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
3881   // `Incoming`. This is analogous to the vectorized case above: extracting the
3882   // second last element when VF > 1.
3883   else if (UF > 1)
3884     ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2);
3885 
3886   // Fix the initial value of the original recurrence in the scalar loop.
3887   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
3888   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
3889   for (auto *BB : predecessors(LoopScalarPreHeader)) {
3890     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
3891     Start->addIncoming(Incoming, BB);
3892   }
3893 
3894   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
3895   Phi->setName("scalar.recur");
3896 
3897   // Finally, fix users of the recurrence outside the loop. The users will need
3898   // either the last value of the scalar recurrence or the last value of the
3899   // vector recurrence we extracted in the middle block. Since the loop is in
3900   // LCSSA form, we just need to find all the phi nodes for the original scalar
3901   // recurrence in the exit block, and then add an edge for the middle block.
3902   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3903     if (LCSSAPhi.getIncomingValue(0) == Phi) {
3904       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
3905     }
3906   }
3907 }
3908 
3909 void InnerLoopVectorizer::fixReduction(PHINode *Phi) {
3910   Constant *Zero = Builder.getInt32(0);
3911 
3912   // Get it's reduction variable descriptor.
3913   assert(Legal->isReductionVariable(Phi) &&
3914          "Unable to find the reduction variable");
3915   RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
3916 
3917   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3918   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
3919   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
3920   RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
3921     RdxDesc.getMinMaxRecurrenceKind();
3922   setDebugLocFromInst(Builder, ReductionStartValue);
3923   bool IsInLoopReductionPhi = Cost->isInLoopReduction(Phi);
3924 
3925   // We need to generate a reduction vector from the incoming scalar.
3926   // To do so, we need to generate the 'identity' vector and override
3927   // one of the elements with the incoming scalar reduction. We need
3928   // to do it in the vector-loop preheader.
3929   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3930 
3931   // This is the vector-clone of the value that leaves the loop.
3932   Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType();
3933 
3934   // Find the reduction identity variable. Zero for addition, or, xor,
3935   // one for multiplication, -1 for And.
3936   Value *Identity;
3937   Value *VectorStart;
3938   if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
3939       RK == RecurrenceDescriptor::RK_FloatMinMax) {
3940     // MinMax reduction have the start value as their identify.
3941     if (VF.isScalar() || IsInLoopReductionPhi) {
3942       VectorStart = Identity = ReductionStartValue;
3943     } else {
3944       VectorStart = Identity =
3945         Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident");
3946     }
3947   } else {
3948     // Handle other reduction kinds:
3949     Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
3950         RK, MinMaxKind, VecTy->getScalarType());
3951     if (VF.isScalar() || IsInLoopReductionPhi) {
3952       Identity = Iden;
3953       // This vector is the Identity vector where the first element is the
3954       // incoming scalar reduction.
3955       VectorStart = ReductionStartValue;
3956     } else {
3957       Identity = ConstantVector::getSplat(VF, Iden);
3958 
3959       // This vector is the Identity vector where the first element is the
3960       // incoming scalar reduction.
3961       VectorStart =
3962         Builder.CreateInsertElement(Identity, ReductionStartValue, Zero);
3963     }
3964   }
3965 
3966   // Wrap flags are in general invalid after vectorization, clear them.
3967   clearReductionWrapFlags(RdxDesc);
3968 
3969   // Fix the vector-loop phi.
3970 
3971   // Reductions do not have to start at zero. They can start with
3972   // any loop invariant values.
3973   BasicBlock *Latch = OrigLoop->getLoopLatch();
3974   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
3975 
3976   for (unsigned Part = 0; Part < UF; ++Part) {
3977     Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part);
3978     Value *Val = getOrCreateVectorValue(LoopVal, Part);
3979     // Make sure to add the reduction start value only to the
3980     // first unroll part.
3981     Value *StartVal = (Part == 0) ? VectorStart : Identity;
3982     cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader);
3983     cast<PHINode>(VecRdxPhi)
3984       ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3985   }
3986 
3987   // Before each round, move the insertion point right between
3988   // the PHIs and the values we are going to write.
3989   // This allows us to write both PHINodes and the extractelement
3990   // instructions.
3991   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3992 
3993   setDebugLocFromInst(Builder, LoopExitInst);
3994 
3995   // If tail is folded by masking, the vector value to leave the loop should be
3996   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
3997   // instead of the former. For an inloop reduction the reduction will already
3998   // be predicated, and does not need to be handled here.
3999   if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) {
4000     for (unsigned Part = 0; Part < UF; ++Part) {
4001       Value *VecLoopExitInst =
4002           VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
4003       Value *Sel = nullptr;
4004       for (User *U : VecLoopExitInst->users()) {
4005         if (isa<SelectInst>(U)) {
4006           assert(!Sel && "Reduction exit feeding two selects");
4007           Sel = U;
4008         } else
4009           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4010       }
4011       assert(Sel && "Reduction exit feeds no select");
4012       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, Sel);
4013 
4014       // If the target can create a predicated operator for the reduction at no
4015       // extra cost in the loop (for example a predicated vadd), it can be
4016       // cheaper for the select to remain in the loop than be sunk out of it,
4017       // and so use the select value for the phi instead of the old
4018       // LoopExitValue.
4019       RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
4020       if (PreferPredicatedReductionSelect ||
4021           TTI->preferPredicatedReductionSelect(
4022               RdxDesc.getRecurrenceBinOp(RdxDesc.getRecurrenceKind()),
4023               Phi->getType(), TargetTransformInfo::ReductionFlags())) {
4024         auto *VecRdxPhi = cast<PHINode>(getOrCreateVectorValue(Phi, Part));
4025         VecRdxPhi->setIncomingValueForBlock(
4026             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4027       }
4028     }
4029   }
4030 
4031   // If the vector reduction can be performed in a smaller type, we truncate
4032   // then extend the loop exit value to enable InstCombine to evaluate the
4033   // entire expression in the smaller type.
4034   if (VF.isVector() && Phi->getType() != RdxDesc.getRecurrenceType()) {
4035     assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!");
4036     assert(!VF.isScalable() && "scalable vectors not yet supported.");
4037     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4038     Builder.SetInsertPoint(
4039         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4040     VectorParts RdxParts(UF);
4041     for (unsigned Part = 0; Part < UF; ++Part) {
4042       RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
4043       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4044       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4045                                         : Builder.CreateZExt(Trunc, VecTy);
4046       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
4047            UI != RdxParts[Part]->user_end();)
4048         if (*UI != Trunc) {
4049           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
4050           RdxParts[Part] = Extnd;
4051         } else {
4052           ++UI;
4053         }
4054     }
4055     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4056     for (unsigned Part = 0; Part < UF; ++Part) {
4057       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4058       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]);
4059     }
4060   }
4061 
4062   // Reduce all of the unrolled parts into a single vector.
4063   Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0);
4064   unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
4065 
4066   // The middle block terminator has already been assigned a DebugLoc here (the
4067   // OrigLoop's single latch terminator). We want the whole middle block to
4068   // appear to execute on this line because: (a) it is all compiler generated,
4069   // (b) these instructions are always executed after evaluating the latch
4070   // conditional branch, and (c) other passes may add new predecessors which
4071   // terminate on this line. This is the easiest way to ensure we don't
4072   // accidentally cause an extra step back into the loop while debugging.
4073   setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
4074   for (unsigned Part = 1; Part < UF; ++Part) {
4075     Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
4076     if (Op != Instruction::ICmp && Op != Instruction::FCmp)
4077       // Floating point operations had to be 'fast' to enable the reduction.
4078       ReducedPartRdx = addFastMathFlag(
4079           Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart,
4080                               ReducedPartRdx, "bin.rdx"),
4081           RdxDesc.getFastMathFlags());
4082     else
4083       ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx,
4084                                       RdxPart);
4085   }
4086 
4087   // Create the reduction after the loop. Note that inloop reductions create the
4088   // target reduction in the loop using a Reduction recipe.
4089   if (VF.isVector() && !IsInLoopReductionPhi) {
4090     bool NoNaN = Legal->hasFunNoNaNAttr();
4091     ReducedPartRdx =
4092         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN);
4093     // If the reduction can be performed in a smaller type, we need to extend
4094     // the reduction to the wider type before we branch to the original loop.
4095     if (Phi->getType() != RdxDesc.getRecurrenceType())
4096       ReducedPartRdx =
4097         RdxDesc.isSigned()
4098         ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
4099         : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
4100   }
4101 
4102   // Create a phi node that merges control-flow from the backedge-taken check
4103   // block and the middle block.
4104   PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
4105                                         LoopScalarPreHeader->getTerminator());
4106   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4107     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4108   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4109 
4110   // Now, we need to fix the users of the reduction variable
4111   // inside and outside of the scalar remainder loop.
4112   // We know that the loop is in LCSSA form. We need to update the
4113   // PHI nodes in the exit blocks.
4114   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4115     // All PHINodes need to have a single entry edge, or two if
4116     // we already fixed them.
4117     assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
4118 
4119     // We found a reduction value exit-PHI. Update it with the
4120     // incoming bypass edge.
4121     if (LCSSAPhi.getIncomingValue(0) == LoopExitInst)
4122       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4123   } // end of the LCSSA phi scan.
4124 
4125     // Fix the scalar loop reduction variable with the incoming reduction sum
4126     // from the vector body and from the backedge value.
4127   int IncomingEdgeBlockIdx =
4128     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4129   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4130   // Pick the other block.
4131   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4132   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4133   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4134 }
4135 
4136 void InnerLoopVectorizer::clearReductionWrapFlags(
4137     RecurrenceDescriptor &RdxDesc) {
4138   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
4139   if (RK != RecurrenceDescriptor::RK_IntegerAdd &&
4140       RK != RecurrenceDescriptor::RK_IntegerMult)
4141     return;
4142 
4143   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4144   assert(LoopExitInstr && "null loop exit instruction");
4145   SmallVector<Instruction *, 8> Worklist;
4146   SmallPtrSet<Instruction *, 8> Visited;
4147   Worklist.push_back(LoopExitInstr);
4148   Visited.insert(LoopExitInstr);
4149 
4150   while (!Worklist.empty()) {
4151     Instruction *Cur = Worklist.pop_back_val();
4152     if (isa<OverflowingBinaryOperator>(Cur))
4153       for (unsigned Part = 0; Part < UF; ++Part) {
4154         Value *V = getOrCreateVectorValue(Cur, Part);
4155         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4156       }
4157 
4158     for (User *U : Cur->users()) {
4159       Instruction *UI = cast<Instruction>(U);
4160       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4161           Visited.insert(UI).second)
4162         Worklist.push_back(UI);
4163     }
4164   }
4165 }
4166 
4167 void InnerLoopVectorizer::fixLCSSAPHIs() {
4168   assert(!VF.isScalable() && "the code below assumes fixed width vectors");
4169   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4170     if (LCSSAPhi.getNumIncomingValues() == 1) {
4171       auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4172       // Non-instruction incoming values will have only one value.
4173       unsigned LastLane = 0;
4174       if (isa<Instruction>(IncomingValue))
4175         LastLane = Cost->isUniformAfterVectorization(
4176                        cast<Instruction>(IncomingValue), VF)
4177                        ? 0
4178                        : VF.getKnownMinValue() - 1;
4179       // Can be a loop invariant incoming value or the last scalar value to be
4180       // extracted from the vectorized loop.
4181       Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4182       Value *lastIncomingValue =
4183           getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane });
4184       LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4185     }
4186   }
4187 }
4188 
4189 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4190   // The basic block and loop containing the predicated instruction.
4191   auto *PredBB = PredInst->getParent();
4192   auto *VectorLoop = LI->getLoopFor(PredBB);
4193 
4194   // Initialize a worklist with the operands of the predicated instruction.
4195   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4196 
4197   // Holds instructions that we need to analyze again. An instruction may be
4198   // reanalyzed if we don't yet know if we can sink it or not.
4199   SmallVector<Instruction *, 8> InstsToReanalyze;
4200 
4201   // Returns true if a given use occurs in the predicated block. Phi nodes use
4202   // their operands in their corresponding predecessor blocks.
4203   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4204     auto *I = cast<Instruction>(U.getUser());
4205     BasicBlock *BB = I->getParent();
4206     if (auto *Phi = dyn_cast<PHINode>(I))
4207       BB = Phi->getIncomingBlock(
4208           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4209     return BB == PredBB;
4210   };
4211 
4212   // Iteratively sink the scalarized operands of the predicated instruction
4213   // into the block we created for it. When an instruction is sunk, it's
4214   // operands are then added to the worklist. The algorithm ends after one pass
4215   // through the worklist doesn't sink a single instruction.
4216   bool Changed;
4217   do {
4218     // Add the instructions that need to be reanalyzed to the worklist, and
4219     // reset the changed indicator.
4220     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4221     InstsToReanalyze.clear();
4222     Changed = false;
4223 
4224     while (!Worklist.empty()) {
4225       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4226 
4227       // We can't sink an instruction if it is a phi node, is already in the
4228       // predicated block, is not in the loop, or may have side effects.
4229       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
4230           !VectorLoop->contains(I) || I->mayHaveSideEffects())
4231         continue;
4232 
4233       // It's legal to sink the instruction if all its uses occur in the
4234       // predicated block. Otherwise, there's nothing to do yet, and we may
4235       // need to reanalyze the instruction.
4236       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4237         InstsToReanalyze.push_back(I);
4238         continue;
4239       }
4240 
4241       // Move the instruction to the beginning of the predicated block, and add
4242       // it's operands to the worklist.
4243       I->moveBefore(&*PredBB->getFirstInsertionPt());
4244       Worklist.insert(I->op_begin(), I->op_end());
4245 
4246       // The sinking may have enabled other instructions to be sunk, so we will
4247       // need to iterate.
4248       Changed = true;
4249     }
4250   } while (Changed);
4251 }
4252 
4253 void InnerLoopVectorizer::fixNonInductionPHIs() {
4254   for (PHINode *OrigPhi : OrigPHIsToFix) {
4255     PHINode *NewPhi =
4256         cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0));
4257     unsigned NumIncomingValues = OrigPhi->getNumIncomingValues();
4258 
4259     SmallVector<BasicBlock *, 2> ScalarBBPredecessors(
4260         predecessors(OrigPhi->getParent()));
4261     SmallVector<BasicBlock *, 2> VectorBBPredecessors(
4262         predecessors(NewPhi->getParent()));
4263     assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() &&
4264            "Scalar and Vector BB should have the same number of predecessors");
4265 
4266     // The insertion point in Builder may be invalidated by the time we get
4267     // here. Force the Builder insertion point to something valid so that we do
4268     // not run into issues during insertion point restore in
4269     // getOrCreateVectorValue calls below.
4270     Builder.SetInsertPoint(NewPhi);
4271 
4272     // The predecessor order is preserved and we can rely on mapping between
4273     // scalar and vector block predecessors.
4274     for (unsigned i = 0; i < NumIncomingValues; ++i) {
4275       BasicBlock *NewPredBB = VectorBBPredecessors[i];
4276 
4277       // When looking up the new scalar/vector values to fix up, use incoming
4278       // values from original phi.
4279       Value *ScIncV =
4280           OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]);
4281 
4282       // Scalar incoming value may need a broadcast
4283       Value *NewIncV = getOrCreateVectorValue(ScIncV, 0);
4284       NewPhi->addIncoming(NewIncV, NewPredBB);
4285     }
4286   }
4287 }
4288 
4289 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPUser &Operands,
4290                                    unsigned UF, ElementCount VF,
4291                                    bool IsPtrLoopInvariant,
4292                                    SmallBitVector &IsIndexLoopInvariant,
4293                                    VPTransformState &State) {
4294   // Construct a vector GEP by widening the operands of the scalar GEP as
4295   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4296   // results in a vector of pointers when at least one operand of the GEP
4297   // is vector-typed. Thus, to keep the representation compact, we only use
4298   // vector-typed operands for loop-varying values.
4299 
4300   if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4301     // If we are vectorizing, but the GEP has only loop-invariant operands,
4302     // the GEP we build (by only using vector-typed operands for
4303     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4304     // produce a vector of pointers, we need to either arbitrarily pick an
4305     // operand to broadcast, or broadcast a clone of the original GEP.
4306     // Here, we broadcast a clone of the original.
4307     //
4308     // TODO: If at some point we decide to scalarize instructions having
4309     //       loop-invariant operands, this special case will no longer be
4310     //       required. We would add the scalarization decision to
4311     //       collectLoopScalars() and teach getVectorValue() to broadcast
4312     //       the lane-zero scalar value.
4313     auto *Clone = Builder.Insert(GEP->clone());
4314     for (unsigned Part = 0; Part < UF; ++Part) {
4315       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4316       VectorLoopValueMap.setVectorValue(GEP, Part, EntryPart);
4317       addMetadata(EntryPart, GEP);
4318     }
4319   } else {
4320     // If the GEP has at least one loop-varying operand, we are sure to
4321     // produce a vector of pointers. But if we are only unrolling, we want
4322     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4323     // produce with the code below will be scalar (if VF == 1) or vector
4324     // (otherwise). Note that for the unroll-only case, we still maintain
4325     // values in the vector mapping with initVector, as we do for other
4326     // instructions.
4327     for (unsigned Part = 0; Part < UF; ++Part) {
4328       // The pointer operand of the new GEP. If it's loop-invariant, we
4329       // won't broadcast it.
4330       auto *Ptr = IsPtrLoopInvariant ? State.get(Operands.getOperand(0), {0, 0})
4331                                      : State.get(Operands.getOperand(0), Part);
4332 
4333       // Collect all the indices for the new GEP. If any index is
4334       // loop-invariant, we won't broadcast it.
4335       SmallVector<Value *, 4> Indices;
4336       for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) {
4337         VPValue *Operand = Operands.getOperand(I);
4338         if (IsIndexLoopInvariant[I - 1])
4339           Indices.push_back(State.get(Operand, {0, 0}));
4340         else
4341           Indices.push_back(State.get(Operand, Part));
4342       }
4343 
4344       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4345       // but it should be a vector, otherwise.
4346       auto *NewGEP =
4347           GEP->isInBounds()
4348               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4349                                           Indices)
4350               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4351       assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
4352              "NewGEP is not a pointer vector");
4353       VectorLoopValueMap.setVectorValue(GEP, Part, NewGEP);
4354       addMetadata(NewGEP, GEP);
4355     }
4356   }
4357 }
4358 
4359 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF,
4360                                               ElementCount VF) {
4361   assert(!VF.isScalable() && "scalable vectors not yet supported.");
4362   PHINode *P = cast<PHINode>(PN);
4363   if (EnableVPlanNativePath) {
4364     // Currently we enter here in the VPlan-native path for non-induction
4365     // PHIs where all control flow is uniform. We simply widen these PHIs.
4366     // Create a vector phi with no operands - the vector phi operands will be
4367     // set at the end of vector code generation.
4368     Type *VecTy =
4369         (VF.isScalar()) ? PN->getType() : VectorType::get(PN->getType(), VF);
4370     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4371     VectorLoopValueMap.setVectorValue(P, 0, VecPhi);
4372     OrigPHIsToFix.push_back(P);
4373 
4374     return;
4375   }
4376 
4377   assert(PN->getParent() == OrigLoop->getHeader() &&
4378          "Non-header phis should have been handled elsewhere");
4379 
4380   // In order to support recurrences we need to be able to vectorize Phi nodes.
4381   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4382   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4383   // this value when we vectorize all of the instructions that use the PHI.
4384   if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
4385     for (unsigned Part = 0; Part < UF; ++Part) {
4386       // This is phase one of vectorizing PHIs.
4387       bool ScalarPHI =
4388           (VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN));
4389       Type *VecTy =
4390           ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), VF);
4391       Value *EntryPart = PHINode::Create(
4392           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4393       VectorLoopValueMap.setVectorValue(P, Part, EntryPart);
4394     }
4395     return;
4396   }
4397 
4398   setDebugLocFromInst(Builder, P);
4399 
4400   // This PHINode must be an induction variable.
4401   // Make sure that we know about it.
4402   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4403 
4404   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4405   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4406 
4407   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4408   // which can be found from the original scalar operations.
4409   switch (II.getKind()) {
4410   case InductionDescriptor::IK_NoInduction:
4411     llvm_unreachable("Unknown induction");
4412   case InductionDescriptor::IK_IntInduction:
4413   case InductionDescriptor::IK_FpInduction:
4414     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4415   case InductionDescriptor::IK_PtrInduction: {
4416     // Handle the pointer induction variable case.
4417     assert(P->getType()->isPointerTy() && "Unexpected type.");
4418 
4419     if (Cost->isScalarAfterVectorization(P, VF)) {
4420       // This is the normalized GEP that starts counting at zero.
4421       Value *PtrInd =
4422           Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType());
4423       // Determine the number of scalars we need to generate for each unroll
4424       // iteration. If the instruction is uniform, we only need to generate the
4425       // first lane. Otherwise, we generate all VF values.
4426       unsigned Lanes =
4427           Cost->isUniformAfterVectorization(P, VF) ? 1 : VF.getKnownMinValue();
4428       for (unsigned Part = 0; Part < UF; ++Part) {
4429         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4430           Constant *Idx = ConstantInt::get(PtrInd->getType(),
4431                                            Lane + Part * VF.getKnownMinValue());
4432           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4433           Value *SclrGep =
4434               emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4435           SclrGep->setName("next.gep");
4436           VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep);
4437         }
4438       }
4439       return;
4440     }
4441     assert(isa<SCEVConstant>(II.getStep()) &&
4442            "Induction step not a SCEV constant!");
4443     Type *PhiType = II.getStep()->getType();
4444 
4445     // Build a pointer phi
4446     Value *ScalarStartValue = II.getStartValue();
4447     Type *ScStValueType = ScalarStartValue->getType();
4448     PHINode *NewPointerPhi =
4449         PHINode::Create(ScStValueType, 2, "pointer.phi", Induction);
4450     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4451 
4452     // A pointer induction, performed by using a gep
4453     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4454     Instruction *InductionLoc = LoopLatch->getTerminator();
4455     const SCEV *ScalarStep = II.getStep();
4456     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4457     Value *ScalarStepValue =
4458         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4459     Value *InductionGEP = GetElementPtrInst::Create(
4460         ScStValueType->getPointerElementType(), NewPointerPhi,
4461         Builder.CreateMul(
4462             ScalarStepValue,
4463             ConstantInt::get(PhiType, VF.getKnownMinValue() * UF)),
4464         "ptr.ind", InductionLoc);
4465     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4466 
4467     // Create UF many actual address geps that use the pointer
4468     // phi as base and a vectorized version of the step value
4469     // (<step*0, ..., step*N>) as offset.
4470     for (unsigned Part = 0; Part < UF; ++Part) {
4471       SmallVector<Constant *, 8> Indices;
4472       // Create a vector of consecutive numbers from zero to VF.
4473       for (unsigned i = 0; i < VF.getKnownMinValue(); ++i)
4474         Indices.push_back(
4475             ConstantInt::get(PhiType, i + Part * VF.getKnownMinValue()));
4476       Constant *StartOffset = ConstantVector::get(Indices);
4477 
4478       Value *GEP = Builder.CreateGEP(
4479           ScStValueType->getPointerElementType(), NewPointerPhi,
4480           Builder.CreateMul(
4481               StartOffset,
4482               Builder.CreateVectorSplat(VF.getKnownMinValue(), ScalarStepValue),
4483               "vector.gep"));
4484       VectorLoopValueMap.setVectorValue(P, Part, GEP);
4485     }
4486   }
4487   }
4488 }
4489 
4490 /// A helper function for checking whether an integer division-related
4491 /// instruction may divide by zero (in which case it must be predicated if
4492 /// executed conditionally in the scalar code).
4493 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4494 /// Non-zero divisors that are non compile-time constants will not be
4495 /// converted into multiplication, so we will still end up scalarizing
4496 /// the division, but can do so w/o predication.
4497 static bool mayDivideByZero(Instruction &I) {
4498   assert((I.getOpcode() == Instruction::UDiv ||
4499           I.getOpcode() == Instruction::SDiv ||
4500           I.getOpcode() == Instruction::URem ||
4501           I.getOpcode() == Instruction::SRem) &&
4502          "Unexpected instruction");
4503   Value *Divisor = I.getOperand(1);
4504   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4505   return !CInt || CInt->isZero();
4506 }
4507 
4508 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPUser &User,
4509                                            VPTransformState &State) {
4510   assert(!VF.isScalable() && "scalable vectors not yet supported.");
4511   switch (I.getOpcode()) {
4512   case Instruction::Call:
4513   case Instruction::Br:
4514   case Instruction::PHI:
4515   case Instruction::GetElementPtr:
4516   case Instruction::Select:
4517     llvm_unreachable("This instruction is handled by a different recipe.");
4518   case Instruction::UDiv:
4519   case Instruction::SDiv:
4520   case Instruction::SRem:
4521   case Instruction::URem:
4522   case Instruction::Add:
4523   case Instruction::FAdd:
4524   case Instruction::Sub:
4525   case Instruction::FSub:
4526   case Instruction::FNeg:
4527   case Instruction::Mul:
4528   case Instruction::FMul:
4529   case Instruction::FDiv:
4530   case Instruction::FRem:
4531   case Instruction::Shl:
4532   case Instruction::LShr:
4533   case Instruction::AShr:
4534   case Instruction::And:
4535   case Instruction::Or:
4536   case Instruction::Xor: {
4537     // Just widen unops and binops.
4538     setDebugLocFromInst(Builder, &I);
4539 
4540     for (unsigned Part = 0; Part < UF; ++Part) {
4541       SmallVector<Value *, 2> Ops;
4542       for (VPValue *VPOp : User.operands())
4543         Ops.push_back(State.get(VPOp, Part));
4544 
4545       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
4546 
4547       if (auto *VecOp = dyn_cast<Instruction>(V))
4548         VecOp->copyIRFlags(&I);
4549 
4550       // Use this vector value for all users of the original instruction.
4551       VectorLoopValueMap.setVectorValue(&I, Part, V);
4552       addMetadata(V, &I);
4553     }
4554 
4555     break;
4556   }
4557   case Instruction::ICmp:
4558   case Instruction::FCmp: {
4559     // Widen compares. Generate vector compares.
4560     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4561     auto *Cmp = cast<CmpInst>(&I);
4562     setDebugLocFromInst(Builder, Cmp);
4563     for (unsigned Part = 0; Part < UF; ++Part) {
4564       Value *A = State.get(User.getOperand(0), Part);
4565       Value *B = State.get(User.getOperand(1), Part);
4566       Value *C = nullptr;
4567       if (FCmp) {
4568         // Propagate fast math flags.
4569         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4570         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4571         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4572       } else {
4573         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4574       }
4575       VectorLoopValueMap.setVectorValue(&I, Part, C);
4576       addMetadata(C, &I);
4577     }
4578 
4579     break;
4580   }
4581 
4582   case Instruction::ZExt:
4583   case Instruction::SExt:
4584   case Instruction::FPToUI:
4585   case Instruction::FPToSI:
4586   case Instruction::FPExt:
4587   case Instruction::PtrToInt:
4588   case Instruction::IntToPtr:
4589   case Instruction::SIToFP:
4590   case Instruction::UIToFP:
4591   case Instruction::Trunc:
4592   case Instruction::FPTrunc:
4593   case Instruction::BitCast: {
4594     auto *CI = cast<CastInst>(&I);
4595     setDebugLocFromInst(Builder, CI);
4596 
4597     /// Vectorize casts.
4598     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
4599     Type *DestTy =
4600         (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF);
4601 
4602     for (unsigned Part = 0; Part < UF; ++Part) {
4603       Value *A = State.get(User.getOperand(0), Part);
4604       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4605       VectorLoopValueMap.setVectorValue(&I, Part, Cast);
4606       addMetadata(Cast, &I);
4607     }
4608     break;
4609   }
4610   default:
4611     // This instruction is not vectorized by simple widening.
4612     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4613     llvm_unreachable("Unhandled instruction!");
4614   } // end of switch.
4615 }
4616 
4617 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPUser &ArgOperands,
4618                                                VPTransformState &State) {
4619   assert(!isa<DbgInfoIntrinsic>(I) &&
4620          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4621   setDebugLocFromInst(Builder, &I);
4622 
4623   Module *M = I.getParent()->getParent()->getParent();
4624   auto *CI = cast<CallInst>(&I);
4625 
4626   SmallVector<Type *, 4> Tys;
4627   for (Value *ArgOperand : CI->arg_operands())
4628     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4629 
4630   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4631 
4632   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4633   // version of the instruction.
4634   // Is it beneficial to perform intrinsic call compared to lib call?
4635   bool NeedToScalarize = false;
4636   unsigned CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4637   bool UseVectorIntrinsic =
4638       ID && Cost->getVectorIntrinsicCost(CI, VF) <= CallCost;
4639   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4640          "Instruction should be scalarized elsewhere.");
4641 
4642   for (unsigned Part = 0; Part < UF; ++Part) {
4643     SmallVector<Value *, 4> Args;
4644     for (auto &I : enumerate(ArgOperands.operands())) {
4645       // Some intrinsics have a scalar argument - don't replace it with a
4646       // vector.
4647       Value *Arg;
4648       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4649         Arg = State.get(I.value(), Part);
4650       else
4651         Arg = State.get(I.value(), {0, 0});
4652       Args.push_back(Arg);
4653     }
4654 
4655     Function *VectorF;
4656     if (UseVectorIntrinsic) {
4657       // Use vector version of the intrinsic.
4658       Type *TysForDecl[] = {CI->getType()};
4659       if (VF.isVector()) {
4660         assert(!VF.isScalable() && "VF is assumed to be non scalable.");
4661         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4662       }
4663       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4664       assert(VectorF && "Can't retrieve vector intrinsic.");
4665     } else {
4666       // Use vector version of the function call.
4667       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4668 #ifndef NDEBUG
4669       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4670              "Can't create vector function.");
4671 #endif
4672         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4673     }
4674       SmallVector<OperandBundleDef, 1> OpBundles;
4675       CI->getOperandBundlesAsDefs(OpBundles);
4676       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4677 
4678       if (isa<FPMathOperator>(V))
4679         V->copyFastMathFlags(CI);
4680 
4681       VectorLoopValueMap.setVectorValue(&I, Part, V);
4682       addMetadata(V, &I);
4683   }
4684 }
4685 
4686 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I,
4687                                                  VPUser &Operands,
4688                                                  bool InvariantCond,
4689                                                  VPTransformState &State) {
4690   setDebugLocFromInst(Builder, &I);
4691 
4692   // The condition can be loop invariant  but still defined inside the
4693   // loop. This means that we can't just use the original 'cond' value.
4694   // We have to take the 'vectorized' value and pick the first lane.
4695   // Instcombine will make this a no-op.
4696   auto *InvarCond =
4697       InvariantCond ? State.get(Operands.getOperand(0), {0, 0}) : nullptr;
4698 
4699   for (unsigned Part = 0; Part < UF; ++Part) {
4700     Value *Cond =
4701         InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part);
4702     Value *Op0 = State.get(Operands.getOperand(1), Part);
4703     Value *Op1 = State.get(Operands.getOperand(2), Part);
4704     Value *Sel = Builder.CreateSelect(Cond, Op0, Op1);
4705     VectorLoopValueMap.setVectorValue(&I, Part, Sel);
4706     addMetadata(Sel, &I);
4707   }
4708 }
4709 
4710 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
4711   // We should not collect Scalars more than once per VF. Right now, this
4712   // function is called from collectUniformsAndScalars(), which already does
4713   // this check. Collecting Scalars for VF=1 does not make any sense.
4714   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
4715          "This function should not be visited twice for the same VF");
4716 
4717   SmallSetVector<Instruction *, 8> Worklist;
4718 
4719   // These sets are used to seed the analysis with pointers used by memory
4720   // accesses that will remain scalar.
4721   SmallSetVector<Instruction *, 8> ScalarPtrs;
4722   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4723   auto *Latch = TheLoop->getLoopLatch();
4724 
4725   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4726   // The pointer operands of loads and stores will be scalar as long as the
4727   // memory access is not a gather or scatter operation. The value operand of a
4728   // store will remain scalar if the store is scalarized.
4729   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4730     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4731     assert(WideningDecision != CM_Unknown &&
4732            "Widening decision should be ready at this moment");
4733     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4734       if (Ptr == Store->getValueOperand())
4735         return WideningDecision == CM_Scalarize;
4736     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4737            "Ptr is neither a value or pointer operand");
4738     return WideningDecision != CM_GatherScatter;
4739   };
4740 
4741   // A helper that returns true if the given value is a bitcast or
4742   // getelementptr instruction contained in the loop.
4743   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4744     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4745             isa<GetElementPtrInst>(V)) &&
4746            !TheLoop->isLoopInvariant(V);
4747   };
4748 
4749   auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) {
4750     if (!isa<PHINode>(Ptr) ||
4751         !Legal->getInductionVars().count(cast<PHINode>(Ptr)))
4752       return false;
4753     auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)];
4754     if (Induction.getKind() != InductionDescriptor::IK_PtrInduction)
4755       return false;
4756     return isScalarUse(MemAccess, Ptr);
4757   };
4758 
4759   // A helper that evaluates a memory access's use of a pointer. If the
4760   // pointer is actually the pointer induction of a loop, it is being
4761   // inserted into Worklist. If the use will be a scalar use, and the
4762   // pointer is only used by memory accesses, we place the pointer in
4763   // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs.
4764   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4765     if (isScalarPtrInduction(MemAccess, Ptr)) {
4766       Worklist.insert(cast<Instruction>(Ptr));
4767       Instruction *Update = cast<Instruction>(
4768           cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch));
4769       Worklist.insert(Update);
4770       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr
4771                         << "\n");
4772       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update
4773                         << "\n");
4774       return;
4775     }
4776     // We only care about bitcast and getelementptr instructions contained in
4777     // the loop.
4778     if (!isLoopVaryingBitCastOrGEP(Ptr))
4779       return;
4780 
4781     // If the pointer has already been identified as scalar (e.g., if it was
4782     // also identified as uniform), there's nothing to do.
4783     auto *I = cast<Instruction>(Ptr);
4784     if (Worklist.count(I))
4785       return;
4786 
4787     // If the use of the pointer will be a scalar use, and all users of the
4788     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4789     // place the pointer in PossibleNonScalarPtrs.
4790     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4791           return isa<LoadInst>(U) || isa<StoreInst>(U);
4792         }))
4793       ScalarPtrs.insert(I);
4794     else
4795       PossibleNonScalarPtrs.insert(I);
4796   };
4797 
4798   // We seed the scalars analysis with three classes of instructions: (1)
4799   // instructions marked uniform-after-vectorization and (2) bitcast,
4800   // getelementptr and (pointer) phi instructions used by memory accesses
4801   // requiring a scalar use.
4802   //
4803   // (1) Add to the worklist all instructions that have been identified as
4804   // uniform-after-vectorization.
4805   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4806 
4807   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4808   // memory accesses requiring a scalar use. The pointer operands of loads and
4809   // stores will be scalar as long as the memory accesses is not a gather or
4810   // scatter operation. The value operand of a store will remain scalar if the
4811   // store is scalarized.
4812   for (auto *BB : TheLoop->blocks())
4813     for (auto &I : *BB) {
4814       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4815         evaluatePtrUse(Load, Load->getPointerOperand());
4816       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4817         evaluatePtrUse(Store, Store->getPointerOperand());
4818         evaluatePtrUse(Store, Store->getValueOperand());
4819       }
4820     }
4821   for (auto *I : ScalarPtrs)
4822     if (!PossibleNonScalarPtrs.count(I)) {
4823       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4824       Worklist.insert(I);
4825     }
4826 
4827   // Insert the forced scalars.
4828   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4829   // induction variable when the PHI user is scalarized.
4830   auto ForcedScalar = ForcedScalars.find(VF);
4831   if (ForcedScalar != ForcedScalars.end())
4832     for (auto *I : ForcedScalar->second)
4833       Worklist.insert(I);
4834 
4835   // Expand the worklist by looking through any bitcasts and getelementptr
4836   // instructions we've already identified as scalar. This is similar to the
4837   // expansion step in collectLoopUniforms(); however, here we're only
4838   // expanding to include additional bitcasts and getelementptr instructions.
4839   unsigned Idx = 0;
4840   while (Idx != Worklist.size()) {
4841     Instruction *Dst = Worklist[Idx++];
4842     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4843       continue;
4844     auto *Src = cast<Instruction>(Dst->getOperand(0));
4845     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4846           auto *J = cast<Instruction>(U);
4847           return !TheLoop->contains(J) || Worklist.count(J) ||
4848                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4849                   isScalarUse(J, Src));
4850         })) {
4851       Worklist.insert(Src);
4852       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4853     }
4854   }
4855 
4856   // An induction variable will remain scalar if all users of the induction
4857   // variable and induction variable update remain scalar.
4858   for (auto &Induction : Legal->getInductionVars()) {
4859     auto *Ind = Induction.first;
4860     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4861 
4862     // If tail-folding is applied, the primary induction variable will be used
4863     // to feed a vector compare.
4864     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
4865       continue;
4866 
4867     // Determine if all users of the induction variable are scalar after
4868     // vectorization.
4869     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4870       auto *I = cast<Instruction>(U);
4871       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
4872     });
4873     if (!ScalarInd)
4874       continue;
4875 
4876     // Determine if all users of the induction variable update instruction are
4877     // scalar after vectorization.
4878     auto ScalarIndUpdate =
4879         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4880           auto *I = cast<Instruction>(U);
4881           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
4882         });
4883     if (!ScalarIndUpdate)
4884       continue;
4885 
4886     // The induction variable and its update instruction will remain scalar.
4887     Worklist.insert(Ind);
4888     Worklist.insert(IndUpdate);
4889     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4890     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4891                       << "\n");
4892   }
4893 
4894   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4895 }
4896 
4897 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I,
4898                                                          ElementCount VF) {
4899   assert(!VF.isScalable() && "scalable vectors not yet supported.");
4900   if (!blockNeedsPredication(I->getParent()))
4901     return false;
4902   switch(I->getOpcode()) {
4903   default:
4904     break;
4905   case Instruction::Load:
4906   case Instruction::Store: {
4907     if (!Legal->isMaskRequired(I))
4908       return false;
4909     auto *Ptr = getLoadStorePointerOperand(I);
4910     auto *Ty = getMemInstValueType(I);
4911     // We have already decided how to vectorize this instruction, get that
4912     // result.
4913     if (VF.isVector()) {
4914       InstWidening WideningDecision = getWideningDecision(I, VF);
4915       assert(WideningDecision != CM_Unknown &&
4916              "Widening decision should be ready at this moment");
4917       return WideningDecision == CM_Scalarize;
4918     }
4919     const Align Alignment = getLoadStoreAlignment(I);
4920     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4921                                 isLegalMaskedGather(Ty, Alignment))
4922                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4923                                 isLegalMaskedScatter(Ty, Alignment));
4924   }
4925   case Instruction::UDiv:
4926   case Instruction::SDiv:
4927   case Instruction::SRem:
4928   case Instruction::URem:
4929     return mayDivideByZero(*I);
4930   }
4931   return false;
4932 }
4933 
4934 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
4935     Instruction *I, ElementCount VF) {
4936   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4937   assert(getWideningDecision(I, VF) == CM_Unknown &&
4938          "Decision should not be set yet.");
4939   auto *Group = getInterleavedAccessGroup(I);
4940   assert(Group && "Must have a group.");
4941 
4942   // If the instruction's allocated size doesn't equal it's type size, it
4943   // requires padding and will be scalarized.
4944   auto &DL = I->getModule()->getDataLayout();
4945   auto *ScalarTy = getMemInstValueType(I);
4946   if (hasIrregularType(ScalarTy, DL, VF))
4947     return false;
4948 
4949   // Check if masking is required.
4950   // A Group may need masking for one of two reasons: it resides in a block that
4951   // needs predication, or it was decided to use masking to deal with gaps.
4952   bool PredicatedAccessRequiresMasking =
4953       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
4954   bool AccessWithGapsRequiresMasking =
4955       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
4956   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
4957     return true;
4958 
4959   // If masked interleaving is required, we expect that the user/target had
4960   // enabled it, because otherwise it either wouldn't have been created or
4961   // it should have been invalidated by the CostModel.
4962   assert(useMaskedInterleavedAccesses(TTI) &&
4963          "Masked interleave-groups for predicated accesses are not enabled.");
4964 
4965   auto *Ty = getMemInstValueType(I);
4966   const Align Alignment = getLoadStoreAlignment(I);
4967   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4968                           : TTI.isLegalMaskedStore(Ty, Alignment);
4969 }
4970 
4971 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
4972     Instruction *I, ElementCount VF) {
4973   // Get and ensure we have a valid memory instruction.
4974   LoadInst *LI = dyn_cast<LoadInst>(I);
4975   StoreInst *SI = dyn_cast<StoreInst>(I);
4976   assert((LI || SI) && "Invalid memory instruction");
4977 
4978   auto *Ptr = getLoadStorePointerOperand(I);
4979 
4980   // In order to be widened, the pointer should be consecutive, first of all.
4981   if (!Legal->isConsecutivePtr(Ptr))
4982     return false;
4983 
4984   // If the instruction is a store located in a predicated block, it will be
4985   // scalarized.
4986   if (isScalarWithPredication(I))
4987     return false;
4988 
4989   // If the instruction's allocated size doesn't equal it's type size, it
4990   // requires padding and will be scalarized.
4991   auto &DL = I->getModule()->getDataLayout();
4992   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
4993   if (hasIrregularType(ScalarTy, DL, VF))
4994     return false;
4995 
4996   return true;
4997 }
4998 
4999 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
5000   // We should not collect Uniforms more than once per VF. Right now,
5001   // this function is called from collectUniformsAndScalars(), which
5002   // already does this check. Collecting Uniforms for VF=1 does not make any
5003   // sense.
5004 
5005   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
5006          "This function should not be visited twice for the same VF");
5007 
5008   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5009   // not analyze again.  Uniforms.count(VF) will return 1.
5010   Uniforms[VF].clear();
5011 
5012   // We now know that the loop is vectorizable!
5013   // Collect instructions inside the loop that will remain uniform after
5014   // vectorization.
5015 
5016   // Global values, params and instructions outside of current loop are out of
5017   // scope.
5018   auto isOutOfScope = [&](Value *V) -> bool {
5019     Instruction *I = dyn_cast<Instruction>(V);
5020     return (!I || !TheLoop->contains(I));
5021   };
5022 
5023   SetVector<Instruction *> Worklist;
5024   BasicBlock *Latch = TheLoop->getLoopLatch();
5025 
5026   // Instructions that are scalar with predication must not be considered
5027   // uniform after vectorization, because that would create an erroneous
5028   // replicating region where only a single instance out of VF should be formed.
5029   // TODO: optimize such seldom cases if found important, see PR40816.
5030   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5031     if (isScalarWithPredication(I, VF)) {
5032       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
5033                         << *I << "\n");
5034       return;
5035     }
5036     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
5037     Worklist.insert(I);
5038   };
5039 
5040   // Start with the conditional branch. If the branch condition is an
5041   // instruction contained in the loop that is only used by the branch, it is
5042   // uniform.
5043   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5044   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5045     addToWorklistIfAllowed(Cmp);
5046 
5047   // Holds consecutive and consecutive-like pointers. Consecutive-like pointers
5048   // are pointers that are treated like consecutive pointers during
5049   // vectorization. The pointer operands of interleaved accesses are an
5050   // example.
5051   SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs;
5052 
5053   // Holds pointer operands of instructions that are possibly non-uniform.
5054   SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs;
5055 
5056   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5057     InstWidening WideningDecision = getWideningDecision(I, VF);
5058     assert(WideningDecision != CM_Unknown &&
5059            "Widening decision should be ready at this moment");
5060 
5061     return (WideningDecision == CM_Widen ||
5062             WideningDecision == CM_Widen_Reverse ||
5063             WideningDecision == CM_Interleave);
5064   };
5065   // Iterate over the instructions in the loop, and collect all
5066   // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible
5067   // that a consecutive-like pointer operand will be scalarized, we collect it
5068   // in PossibleNonUniformPtrs instead. We use two sets here because a single
5069   // getelementptr instruction can be used by both vectorized and scalarized
5070   // memory instructions. For example, if a loop loads and stores from the same
5071   // location, but the store is conditional, the store will be scalarized, and
5072   // the getelementptr won't remain uniform.
5073   for (auto *BB : TheLoop->blocks())
5074     for (auto &I : *BB) {
5075       // If there's no pointer operand, there's nothing to do.
5076       auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
5077       if (!Ptr)
5078         continue;
5079 
5080       // True if all users of Ptr are memory accesses that have Ptr as their
5081       // pointer operand.
5082       auto UsersAreMemAccesses =
5083           llvm::all_of(Ptr->users(), [&](User *U) -> bool {
5084             return getLoadStorePointerOperand(U) == Ptr;
5085           });
5086 
5087       // Ensure the memory instruction will not be scalarized or used by
5088       // gather/scatter, making its pointer operand non-uniform. If the pointer
5089       // operand is used by any instruction other than a memory access, we
5090       // conservatively assume the pointer operand may be non-uniform.
5091       if (!UsersAreMemAccesses || !isUniformDecision(&I, VF))
5092         PossibleNonUniformPtrs.insert(Ptr);
5093 
5094       // If the memory instruction will be vectorized and its pointer operand
5095       // is consecutive-like, or interleaving - the pointer operand should
5096       // remain uniform.
5097       else
5098         ConsecutiveLikePtrs.insert(Ptr);
5099     }
5100 
5101   // Add to the Worklist all consecutive and consecutive-like pointers that
5102   // aren't also identified as possibly non-uniform.
5103   for (auto *V : ConsecutiveLikePtrs)
5104     if (!PossibleNonUniformPtrs.count(V))
5105       addToWorklistIfAllowed(V);
5106 
5107   // Expand Worklist in topological order: whenever a new instruction
5108   // is added , its users should be already inside Worklist.  It ensures
5109   // a uniform instruction will only be used by uniform instructions.
5110   unsigned idx = 0;
5111   while (idx != Worklist.size()) {
5112     Instruction *I = Worklist[idx++];
5113 
5114     for (auto OV : I->operand_values()) {
5115       // isOutOfScope operands cannot be uniform instructions.
5116       if (isOutOfScope(OV))
5117         continue;
5118       // First order recurrence Phi's should typically be considered
5119       // non-uniform.
5120       auto *OP = dyn_cast<PHINode>(OV);
5121       if (OP && Legal->isFirstOrderRecurrence(OP))
5122         continue;
5123       // If all the users of the operand are uniform, then add the
5124       // operand into the uniform worklist.
5125       auto *OI = cast<Instruction>(OV);
5126       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5127             auto *J = cast<Instruction>(U);
5128             return Worklist.count(J) ||
5129                    (OI == getLoadStorePointerOperand(J) &&
5130                     isUniformDecision(J, VF));
5131           }))
5132         addToWorklistIfAllowed(OI);
5133     }
5134   }
5135 
5136   // Returns true if Ptr is the pointer operand of a memory access instruction
5137   // I, and I is known to not require scalarization.
5138   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5139     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5140   };
5141 
5142   // For an instruction to be added into Worklist above, all its users inside
5143   // the loop should also be in Worklist. However, this condition cannot be
5144   // true for phi nodes that form a cyclic dependence. We must process phi
5145   // nodes separately. An induction variable will remain uniform if all users
5146   // of the induction variable and induction variable update remain uniform.
5147   // The code below handles both pointer and non-pointer induction variables.
5148   for (auto &Induction : Legal->getInductionVars()) {
5149     auto *Ind = Induction.first;
5150     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5151 
5152     // Determine if all users of the induction variable are uniform after
5153     // vectorization.
5154     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5155       auto *I = cast<Instruction>(U);
5156       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5157              isVectorizedMemAccessUse(I, Ind);
5158     });
5159     if (!UniformInd)
5160       continue;
5161 
5162     // Determine if all users of the induction variable update instruction are
5163     // uniform after vectorization.
5164     auto UniformIndUpdate =
5165         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5166           auto *I = cast<Instruction>(U);
5167           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5168                  isVectorizedMemAccessUse(I, IndUpdate);
5169         });
5170     if (!UniformIndUpdate)
5171       continue;
5172 
5173     // The induction variable and its update instruction will remain uniform.
5174     addToWorklistIfAllowed(Ind);
5175     addToWorklistIfAllowed(IndUpdate);
5176   }
5177 
5178   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5179 }
5180 
5181 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5182   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5183 
5184   if (Legal->getRuntimePointerChecking()->Need) {
5185     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5186         "runtime pointer checks needed. Enable vectorization of this "
5187         "loop with '#pragma clang loop vectorize(enable)' when "
5188         "compiling with -Os/-Oz",
5189         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5190     return true;
5191   }
5192 
5193   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5194     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5195         "runtime SCEV checks needed. Enable vectorization of this "
5196         "loop with '#pragma clang loop vectorize(enable)' when "
5197         "compiling with -Os/-Oz",
5198         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5199     return true;
5200   }
5201 
5202   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5203   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5204     reportVectorizationFailure("Runtime stride check for small trip count",
5205         "runtime stride == 1 checks needed. Enable vectorization of "
5206         "this loop without such check by compiling with -Os/-Oz",
5207         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5208     return true;
5209   }
5210 
5211   return false;
5212 }
5213 
5214 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(unsigned UserVF,
5215                                                             unsigned UserIC) {
5216   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5217     // TODO: It may by useful to do since it's still likely to be dynamically
5218     // uniform if the target can skip.
5219     reportVectorizationFailure(
5220         "Not inserting runtime ptr check for divergent target",
5221         "runtime pointer checks needed. Not enabled for divergent target",
5222         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5223     return None;
5224   }
5225 
5226   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5227   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5228   if (TC == 1) {
5229     reportVectorizationFailure("Single iteration (non) loop",
5230         "loop trip count is one, irrelevant for vectorization",
5231         "SingleIterationLoop", ORE, TheLoop);
5232     return None;
5233   }
5234 
5235   switch (ScalarEpilogueStatus) {
5236   case CM_ScalarEpilogueAllowed:
5237     return UserVF ? UserVF : computeFeasibleMaxVF(TC);
5238   case CM_ScalarEpilogueNotNeededUsePredicate:
5239     LLVM_DEBUG(
5240         dbgs() << "LV: vector predicate hint/switch found.\n"
5241                << "LV: Not allowing scalar epilogue, creating predicated "
5242                << "vector loop.\n");
5243     break;
5244   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5245     // fallthrough as a special case of OptForSize
5246   case CM_ScalarEpilogueNotAllowedOptSize:
5247     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5248       LLVM_DEBUG(
5249           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5250     else
5251       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5252                         << "count.\n");
5253 
5254     // Bail if runtime checks are required, which are not good when optimising
5255     // for size.
5256     if (runtimeChecksRequired())
5257       return None;
5258     break;
5259   }
5260 
5261   // Now try the tail folding
5262 
5263   // Invalidate interleave groups that require an epilogue if we can't mask
5264   // the interleave-group.
5265   if (!useMaskedInterleavedAccesses(TTI)) {
5266     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5267            "No decisions should have been taken at this point");
5268     // Note: There is no need to invalidate any cost modeling decisions here, as
5269     // non where taken so far.
5270     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5271   }
5272 
5273   unsigned MaxVF = UserVF ? UserVF : computeFeasibleMaxVF(TC);
5274   assert((UserVF || isPowerOf2_32(MaxVF)) && "MaxVF must be a power of 2");
5275   unsigned MaxVFtimesIC = UserIC ? MaxVF * UserIC : MaxVF;
5276   if (TC > 0 && TC % MaxVFtimesIC == 0) {
5277     // Accept MaxVF if we do not have a tail.
5278     LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5279     return MaxVF;
5280   }
5281 
5282   // If we don't know the precise trip count, or if the trip count that we
5283   // found modulo the vectorization factor is not zero, try to fold the tail
5284   // by masking.
5285   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5286   if (Legal->prepareToFoldTailByMasking()) {
5287     FoldTailByMasking = true;
5288     return MaxVF;
5289   }
5290 
5291   // If there was a tail-folding hint/switch, but we can't fold the tail by
5292   // masking, fallback to a vectorization with a scalar epilogue.
5293   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5294     if (PreferPredicateOverEpilogue == PreferPredicateTy::PredicateOrDontVectorize) {
5295       LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5296       return None;
5297     }
5298     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5299                          "scalar epilogue instead.\n");
5300     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5301     return MaxVF;
5302   }
5303 
5304   if (TC == 0) {
5305     reportVectorizationFailure(
5306         "Unable to calculate the loop count due to complex control flow",
5307         "unable to calculate the loop count due to complex control flow",
5308         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5309     return None;
5310   }
5311 
5312   reportVectorizationFailure(
5313       "Cannot optimize for size and vectorize at the same time.",
5314       "cannot optimize for size and vectorize at the same time. "
5315       "Enable vectorization of this loop with '#pragma clang loop "
5316       "vectorize(enable)' when compiling with -Os/-Oz",
5317       "NoTailLoopWithOptForSize", ORE, TheLoop);
5318   return None;
5319 }
5320 
5321 unsigned
5322 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount) {
5323   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5324   unsigned SmallestType, WidestType;
5325   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5326   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
5327 
5328   // Get the maximum safe dependence distance in bits computed by LAA.
5329   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5330   // the memory accesses that is most restrictive (involved in the smallest
5331   // dependence distance).
5332   unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth();
5333 
5334   WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth);
5335 
5336   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5337   // Note that both WidestRegister and WidestType may not be a powers of 2.
5338   unsigned MaxVectorSize = PowerOf2Floor(WidestRegister / WidestType);
5339 
5340   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5341                     << " / " << WidestType << " bits.\n");
5342   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5343                     << WidestRegister << " bits.\n");
5344 
5345   assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements"
5346                                  " into one vector!");
5347   if (MaxVectorSize == 0) {
5348     LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
5349     MaxVectorSize = 1;
5350     return MaxVectorSize;
5351   } else if (ConstTripCount && ConstTripCount < MaxVectorSize &&
5352              isPowerOf2_32(ConstTripCount)) {
5353     // We need to clamp the VF to be the ConstTripCount. There is no point in
5354     // choosing a higher viable VF as done in the loop below.
5355     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5356                       << ConstTripCount << "\n");
5357     MaxVectorSize = ConstTripCount;
5358     return MaxVectorSize;
5359   }
5360 
5361   unsigned MaxVF = MaxVectorSize;
5362   if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) ||
5363       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5364     // Collect all viable vectorization factors larger than the default MaxVF
5365     // (i.e. MaxVectorSize).
5366     SmallVector<ElementCount, 8> VFs;
5367     unsigned NewMaxVectorSize = WidestRegister / SmallestType;
5368     for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2)
5369       VFs.push_back(ElementCount::getFixed(VS));
5370 
5371     // For each VF calculate its register usage.
5372     auto RUs = calculateRegisterUsage(VFs);
5373 
5374     // Select the largest VF which doesn't require more registers than existing
5375     // ones.
5376     for (int i = RUs.size() - 1; i >= 0; --i) {
5377       bool Selected = true;
5378       for (auto& pair : RUs[i].MaxLocalUsers) {
5379         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5380         if (pair.second > TargetNumRegisters)
5381           Selected = false;
5382       }
5383       if (Selected) {
5384         MaxVF = VFs[i].getKnownMinValue();
5385         break;
5386       }
5387     }
5388     if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) {
5389       if (MaxVF < MinVF) {
5390         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5391                           << ") with target's minimum: " << MinVF << '\n');
5392         MaxVF = MinVF;
5393       }
5394     }
5395   }
5396   return MaxVF;
5397 }
5398 
5399 VectorizationFactor
5400 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
5401   float Cost = expectedCost(ElementCount::getFixed(1)).first;
5402   const float ScalarCost = Cost;
5403   unsigned Width = 1;
5404   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
5405 
5406   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5407   if (ForceVectorization && MaxVF > 1) {
5408     // Ignore scalar width, because the user explicitly wants vectorization.
5409     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5410     // evaluation.
5411     Cost = std::numeric_limits<float>::max();
5412   }
5413 
5414   for (unsigned i = 2; i <= MaxVF; i *= 2) {
5415     // Notice that the vector loop needs to be executed less times, so
5416     // we need to divide the cost of the vector loops by the width of
5417     // the vector elements.
5418     VectorizationCostTy C = expectedCost(ElementCount::getFixed(i));
5419     float VectorCost = C.first / (float)i;
5420     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5421                       << " costs: " << (int)VectorCost << ".\n");
5422     if (!C.second && !ForceVectorization) {
5423       LLVM_DEBUG(
5424           dbgs() << "LV: Not considering vector loop of width " << i
5425                  << " because it will not generate any vector instructions.\n");
5426       continue;
5427     }
5428     if (VectorCost < Cost) {
5429       Cost = VectorCost;
5430       Width = i;
5431     }
5432   }
5433 
5434   if (!EnableCondStoresVectorization && NumPredStores) {
5435     reportVectorizationFailure("There are conditional stores.",
5436         "store that is conditionally executed prevents vectorization",
5437         "ConditionalStore", ORE, TheLoop);
5438     Width = 1;
5439     Cost = ScalarCost;
5440   }
5441 
5442   LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
5443              << "LV: Vectorization seems to be not beneficial, "
5444              << "but was forced by a user.\n");
5445   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
5446   VectorizationFactor Factor = {ElementCount::getFixed(Width),
5447                                 (unsigned)(Width * Cost)};
5448   return Factor;
5449 }
5450 
5451 std::pair<unsigned, unsigned>
5452 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5453   unsigned MinWidth = -1U;
5454   unsigned MaxWidth = 8;
5455   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5456 
5457   // For each block.
5458   for (BasicBlock *BB : TheLoop->blocks()) {
5459     // For each instruction in the loop.
5460     for (Instruction &I : BB->instructionsWithoutDebug()) {
5461       Type *T = I.getType();
5462 
5463       // Skip ignored values.
5464       if (ValuesToIgnore.count(&I))
5465         continue;
5466 
5467       // Only examine Loads, Stores and PHINodes.
5468       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5469         continue;
5470 
5471       // Examine PHI nodes that are reduction variables. Update the type to
5472       // account for the recurrence type.
5473       if (auto *PN = dyn_cast<PHINode>(&I)) {
5474         if (!Legal->isReductionVariable(PN))
5475           continue;
5476         RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN];
5477         T = RdxDesc.getRecurrenceType();
5478       }
5479 
5480       // Examine the stored values.
5481       if (auto *ST = dyn_cast<StoreInst>(&I))
5482         T = ST->getValueOperand()->getType();
5483 
5484       // Ignore loaded pointer types and stored pointer types that are not
5485       // vectorizable.
5486       //
5487       // FIXME: The check here attempts to predict whether a load or store will
5488       //        be vectorized. We only know this for certain after a VF has
5489       //        been selected. Here, we assume that if an access can be
5490       //        vectorized, it will be. We should also look at extending this
5491       //        optimization to non-pointer types.
5492       //
5493       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
5494           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
5495         continue;
5496 
5497       MinWidth = std::min(MinWidth,
5498                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5499       MaxWidth = std::max(MaxWidth,
5500                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5501     }
5502   }
5503 
5504   return {MinWidth, MaxWidth};
5505 }
5506 
5507 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
5508                                                            unsigned LoopCost) {
5509   // -- The interleave heuristics --
5510   // We interleave the loop in order to expose ILP and reduce the loop overhead.
5511   // There are many micro-architectural considerations that we can't predict
5512   // at this level. For example, frontend pressure (on decode or fetch) due to
5513   // code size, or the number and capabilities of the execution ports.
5514   //
5515   // We use the following heuristics to select the interleave count:
5516   // 1. If the code has reductions, then we interleave to break the cross
5517   // iteration dependency.
5518   // 2. If the loop is really small, then we interleave to reduce the loop
5519   // overhead.
5520   // 3. We don't interleave if we think that we will spill registers to memory
5521   // due to the increased register pressure.
5522 
5523   if (!isScalarEpilogueAllowed())
5524     return 1;
5525 
5526   // We used the distance for the interleave count.
5527   if (Legal->getMaxSafeDepDistBytes() != -1U)
5528     return 1;
5529 
5530   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
5531   const bool HasReductions = !Legal->getReductionVars().empty();
5532   // Do not interleave loops with a relatively small known or estimated trip
5533   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
5534   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
5535   // because with the above conditions interleaving can expose ILP and break
5536   // cross iteration dependences for reductions.
5537   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
5538       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
5539     return 1;
5540 
5541   RegisterUsage R = calculateRegisterUsage({VF})[0];
5542   // We divide by these constants so assume that we have at least one
5543   // instruction that uses at least one register.
5544   for (auto& pair : R.MaxLocalUsers) {
5545     pair.second = std::max(pair.second, 1U);
5546   }
5547 
5548   // We calculate the interleave count using the following formula.
5549   // Subtract the number of loop invariants from the number of available
5550   // registers. These registers are used by all of the interleaved instances.
5551   // Next, divide the remaining registers by the number of registers that is
5552   // required by the loop, in order to estimate how many parallel instances
5553   // fit without causing spills. All of this is rounded down if necessary to be
5554   // a power of two. We want power of two interleave count to simplify any
5555   // addressing operations or alignment considerations.
5556   // We also want power of two interleave counts to ensure that the induction
5557   // variable of the vector loop wraps to zero, when tail is folded by masking;
5558   // this currently happens when OptForSize, in which case IC is set to 1 above.
5559   unsigned IC = UINT_MAX;
5560 
5561   for (auto& pair : R.MaxLocalUsers) {
5562     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5563     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
5564                       << " registers of "
5565                       << TTI.getRegisterClassName(pair.first) << " register class\n");
5566     if (VF.isScalar()) {
5567       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
5568         TargetNumRegisters = ForceTargetNumScalarRegs;
5569     } else {
5570       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
5571         TargetNumRegisters = ForceTargetNumVectorRegs;
5572     }
5573     unsigned MaxLocalUsers = pair.second;
5574     unsigned LoopInvariantRegs = 0;
5575     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
5576       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
5577 
5578     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
5579     // Don't count the induction variable as interleaved.
5580     if (EnableIndVarRegisterHeur) {
5581       TmpIC =
5582           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
5583                         std::max(1U, (MaxLocalUsers - 1)));
5584     }
5585 
5586     IC = std::min(IC, TmpIC);
5587   }
5588 
5589   // Clamp the interleave ranges to reasonable counts.
5590   assert(!VF.isScalable() && "scalable vectors not yet supported.");
5591   unsigned MaxInterleaveCount =
5592       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
5593 
5594   // Check if the user has overridden the max.
5595   if (VF.isScalar()) {
5596     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
5597       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
5598   } else {
5599     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
5600       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
5601   }
5602 
5603   // If trip count is known or estimated compile time constant, limit the
5604   // interleave count to be less than the trip count divided by VF, provided it
5605   // is at least 1.
5606   if (BestKnownTC) {
5607     MaxInterleaveCount =
5608         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
5609     // Make sure MaxInterleaveCount is greater than 0.
5610     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
5611   }
5612 
5613   assert(MaxInterleaveCount > 0 &&
5614          "Maximum interleave count must be greater than 0");
5615 
5616   // Clamp the calculated IC to be between the 1 and the max interleave count
5617   // that the target and trip count allows.
5618   if (IC > MaxInterleaveCount)
5619     IC = MaxInterleaveCount;
5620   else
5621     // Make sure IC is greater than 0.
5622     IC = std::max(1u, IC);
5623 
5624   assert(IC > 0 && "Interleave count must be greater than 0.");
5625 
5626   // If we did not calculate the cost for VF (because the user selected the VF)
5627   // then we calculate the cost of VF here.
5628   if (LoopCost == 0)
5629     LoopCost = expectedCost(VF).first;
5630 
5631   assert(LoopCost && "Non-zero loop cost expected");
5632 
5633   // Interleave if we vectorized this loop and there is a reduction that could
5634   // benefit from interleaving.
5635   if (VF.isVector() && HasReductions) {
5636     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
5637     return IC;
5638   }
5639 
5640   // Note that if we've already vectorized the loop we will have done the
5641   // runtime check and so interleaving won't require further checks.
5642   bool InterleavingRequiresRuntimePointerCheck =
5643       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
5644 
5645   // We want to interleave small loops in order to reduce the loop overhead and
5646   // potentially expose ILP opportunities.
5647   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
5648                     << "LV: IC is " << IC << '\n'
5649                     << "LV: VF is " << VF.getKnownMinValue() << '\n');
5650   const bool AggressivelyInterleaveReductions =
5651       TTI.enableAggressiveInterleaving(HasReductions);
5652   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
5653     // We assume that the cost overhead is 1 and we use the cost model
5654     // to estimate the cost of the loop and interleave until the cost of the
5655     // loop overhead is about 5% of the cost of the loop.
5656     unsigned SmallIC =
5657         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
5658 
5659     // Interleave until store/load ports (estimated by max interleave count) are
5660     // saturated.
5661     unsigned NumStores = Legal->getNumStores();
5662     unsigned NumLoads = Legal->getNumLoads();
5663     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5664     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5665 
5666     // If we have a scalar reduction (vector reductions are already dealt with
5667     // by this point), we can increase the critical path length if the loop
5668     // we're interleaving is inside another loop. Limit, by default to 2, so the
5669     // critical path only gets increased by one reduction operation.
5670     if (HasReductions && TheLoop->getLoopDepth() > 1) {
5671       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
5672       SmallIC = std::min(SmallIC, F);
5673       StoresIC = std::min(StoresIC, F);
5674       LoadsIC = std::min(LoadsIC, F);
5675     }
5676 
5677     if (EnableLoadStoreRuntimeInterleave &&
5678         std::max(StoresIC, LoadsIC) > SmallIC) {
5679       LLVM_DEBUG(
5680           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
5681       return std::max(StoresIC, LoadsIC);
5682     }
5683 
5684     // If there are scalar reductions and TTI has enabled aggressive
5685     // interleaving for reductions, we will interleave to expose ILP.
5686     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
5687         AggressivelyInterleaveReductions) {
5688       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5689       // Interleave no less than SmallIC but not as aggressive as the normal IC
5690       // to satisfy the rare situation when resources are too limited.
5691       return std::max(IC / 2, SmallIC);
5692     } else {
5693       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
5694       return SmallIC;
5695     }
5696   }
5697 
5698   // Interleave if this is a large loop (small loops are already dealt with by
5699   // this point) that could benefit from interleaving.
5700   if (AggressivelyInterleaveReductions) {
5701     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5702     return IC;
5703   }
5704 
5705   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
5706   return 1;
5707 }
5708 
5709 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
5710 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
5711   // This function calculates the register usage by measuring the highest number
5712   // of values that are alive at a single location. Obviously, this is a very
5713   // rough estimation. We scan the loop in a topological order in order and
5714   // assign a number to each instruction. We use RPO to ensure that defs are
5715   // met before their users. We assume that each instruction that has in-loop
5716   // users starts an interval. We record every time that an in-loop value is
5717   // used, so we have a list of the first and last occurrences of each
5718   // instruction. Next, we transpose this data structure into a multi map that
5719   // holds the list of intervals that *end* at a specific location. This multi
5720   // map allows us to perform a linear search. We scan the instructions linearly
5721   // and record each time that a new interval starts, by placing it in a set.
5722   // If we find this value in the multi-map then we remove it from the set.
5723   // The max register usage is the maximum size of the set.
5724   // We also search for instructions that are defined outside the loop, but are
5725   // used inside the loop. We need this number separately from the max-interval
5726   // usage number because when we unroll, loop-invariant values do not take
5727   // more register.
5728   LoopBlocksDFS DFS(TheLoop);
5729   DFS.perform(LI);
5730 
5731   RegisterUsage RU;
5732 
5733   // Each 'key' in the map opens a new interval. The values
5734   // of the map are the index of the 'last seen' usage of the
5735   // instruction that is the key.
5736   using IntervalMap = DenseMap<Instruction *, unsigned>;
5737 
5738   // Maps instruction to its index.
5739   SmallVector<Instruction *, 64> IdxToInstr;
5740   // Marks the end of each interval.
5741   IntervalMap EndPoint;
5742   // Saves the list of instruction indices that are used in the loop.
5743   SmallPtrSet<Instruction *, 8> Ends;
5744   // Saves the list of values that are used in the loop but are
5745   // defined outside the loop, such as arguments and constants.
5746   SmallPtrSet<Value *, 8> LoopInvariants;
5747 
5748   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
5749     for (Instruction &I : BB->instructionsWithoutDebug()) {
5750       IdxToInstr.push_back(&I);
5751 
5752       // Save the end location of each USE.
5753       for (Value *U : I.operands()) {
5754         auto *Instr = dyn_cast<Instruction>(U);
5755 
5756         // Ignore non-instruction values such as arguments, constants, etc.
5757         if (!Instr)
5758           continue;
5759 
5760         // If this instruction is outside the loop then record it and continue.
5761         if (!TheLoop->contains(Instr)) {
5762           LoopInvariants.insert(Instr);
5763           continue;
5764         }
5765 
5766         // Overwrite previous end points.
5767         EndPoint[Instr] = IdxToInstr.size();
5768         Ends.insert(Instr);
5769       }
5770     }
5771   }
5772 
5773   // Saves the list of intervals that end with the index in 'key'.
5774   using InstrList = SmallVector<Instruction *, 2>;
5775   DenseMap<unsigned, InstrList> TransposeEnds;
5776 
5777   // Transpose the EndPoints to a list of values that end at each index.
5778   for (auto &Interval : EndPoint)
5779     TransposeEnds[Interval.second].push_back(Interval.first);
5780 
5781   SmallPtrSet<Instruction *, 8> OpenIntervals;
5782 
5783   // Get the size of the widest register.
5784   unsigned MaxSafeDepDist = -1U;
5785   if (Legal->getMaxSafeDepDistBytes() != -1U)
5786     MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
5787   unsigned WidestRegister =
5788       std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist);
5789   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5790 
5791   SmallVector<RegisterUsage, 8> RUs(VFs.size());
5792   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
5793 
5794   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
5795 
5796   // A lambda that gets the register usage for the given type and VF.
5797   auto GetRegUsage = [&DL, WidestRegister](Type *Ty, ElementCount VF) {
5798     if (Ty->isTokenTy())
5799       return 0U;
5800     unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
5801     assert(!VF.isScalable() && "scalable vectors not yet supported.");
5802     return std::max<unsigned>(1, VF.getKnownMinValue() * TypeSize /
5803                                      WidestRegister);
5804   };
5805 
5806   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
5807     Instruction *I = IdxToInstr[i];
5808 
5809     // Remove all of the instructions that end at this location.
5810     InstrList &List = TransposeEnds[i];
5811     for (Instruction *ToRemove : List)
5812       OpenIntervals.erase(ToRemove);
5813 
5814     // Ignore instructions that are never used within the loop.
5815     if (!Ends.count(I))
5816       continue;
5817 
5818     // Skip ignored values.
5819     if (ValuesToIgnore.count(I))
5820       continue;
5821 
5822     // For each VF find the maximum usage of registers.
5823     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
5824       // Count the number of live intervals.
5825       SmallMapVector<unsigned, unsigned, 4> RegUsage;
5826 
5827       if (VFs[j].isScalar()) {
5828         for (auto Inst : OpenIntervals) {
5829           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
5830           if (RegUsage.find(ClassID) == RegUsage.end())
5831             RegUsage[ClassID] = 1;
5832           else
5833             RegUsage[ClassID] += 1;
5834         }
5835       } else {
5836         collectUniformsAndScalars(VFs[j]);
5837         for (auto Inst : OpenIntervals) {
5838           // Skip ignored values for VF > 1.
5839           if (VecValuesToIgnore.count(Inst))
5840             continue;
5841           if (isScalarAfterVectorization(Inst, VFs[j])) {
5842             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
5843             if (RegUsage.find(ClassID) == RegUsage.end())
5844               RegUsage[ClassID] = 1;
5845             else
5846               RegUsage[ClassID] += 1;
5847           } else {
5848             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
5849             if (RegUsage.find(ClassID) == RegUsage.end())
5850               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
5851             else
5852               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
5853           }
5854         }
5855       }
5856 
5857       for (auto& pair : RegUsage) {
5858         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
5859           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
5860         else
5861           MaxUsages[j][pair.first] = pair.second;
5862       }
5863     }
5864 
5865     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
5866                       << OpenIntervals.size() << '\n');
5867 
5868     // Add the current instruction to the list of open intervals.
5869     OpenIntervals.insert(I);
5870   }
5871 
5872   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
5873     SmallMapVector<unsigned, unsigned, 4> Invariant;
5874 
5875     for (auto Inst : LoopInvariants) {
5876       unsigned Usage =
5877           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
5878       unsigned ClassID =
5879           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
5880       if (Invariant.find(ClassID) == Invariant.end())
5881         Invariant[ClassID] = Usage;
5882       else
5883         Invariant[ClassID] += Usage;
5884     }
5885 
5886     LLVM_DEBUG({
5887       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
5888       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
5889              << " item\n";
5890       for (const auto &pair : MaxUsages[i]) {
5891         dbgs() << "LV(REG): RegisterClass: "
5892                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
5893                << " registers\n";
5894       }
5895       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
5896              << " item\n";
5897       for (const auto &pair : Invariant) {
5898         dbgs() << "LV(REG): RegisterClass: "
5899                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
5900                << " registers\n";
5901       }
5902     });
5903 
5904     RU.LoopInvariantRegs = Invariant;
5905     RU.MaxLocalUsers = MaxUsages[i];
5906     RUs[i] = RU;
5907   }
5908 
5909   return RUs;
5910 }
5911 
5912 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
5913   // TODO: Cost model for emulated masked load/store is completely
5914   // broken. This hack guides the cost model to use an artificially
5915   // high enough value to practically disable vectorization with such
5916   // operations, except where previously deployed legality hack allowed
5917   // using very low cost values. This is to avoid regressions coming simply
5918   // from moving "masked load/store" check from legality to cost model.
5919   // Masked Load/Gather emulation was previously never allowed.
5920   // Limited number of Masked Store/Scatter emulation was allowed.
5921   assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction");
5922   return isa<LoadInst>(I) ||
5923          (isa<StoreInst>(I) &&
5924           NumPredStores > NumberOfStoresToPredicate);
5925 }
5926 
5927 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
5928   // If we aren't vectorizing the loop, or if we've already collected the
5929   // instructions to scalarize, there's nothing to do. Collection may already
5930   // have occurred if we have a user-selected VF and are now computing the
5931   // expected cost for interleaving.
5932   if (VF.isScalar() || VF.isZero() ||
5933       InstsToScalarize.find(VF) != InstsToScalarize.end())
5934     return;
5935 
5936   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
5937   // not profitable to scalarize any instructions, the presence of VF in the
5938   // map will indicate that we've analyzed it already.
5939   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
5940 
5941   // Find all the instructions that are scalar with predication in the loop and
5942   // determine if it would be better to not if-convert the blocks they are in.
5943   // If so, we also record the instructions to scalarize.
5944   for (BasicBlock *BB : TheLoop->blocks()) {
5945     if (!blockNeedsPredication(BB))
5946       continue;
5947     for (Instruction &I : *BB)
5948       if (isScalarWithPredication(&I)) {
5949         ScalarCostsTy ScalarCosts;
5950         // Do not apply discount logic if hacked cost is needed
5951         // for emulated masked memrefs.
5952         if (!useEmulatedMaskMemRefHack(&I) &&
5953             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
5954           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
5955         // Remember that BB will remain after vectorization.
5956         PredicatedBBsAfterVectorization.insert(BB);
5957       }
5958   }
5959 }
5960 
5961 int LoopVectorizationCostModel::computePredInstDiscount(
5962     Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts,
5963     ElementCount VF) {
5964   assert(!isUniformAfterVectorization(PredInst, VF) &&
5965          "Instruction marked uniform-after-vectorization will be predicated");
5966 
5967   // Initialize the discount to zero, meaning that the scalar version and the
5968   // vector version cost the same.
5969   int Discount = 0;
5970 
5971   // Holds instructions to analyze. The instructions we visit are mapped in
5972   // ScalarCosts. Those instructions are the ones that would be scalarized if
5973   // we find that the scalar version costs less.
5974   SmallVector<Instruction *, 8> Worklist;
5975 
5976   // Returns true if the given instruction can be scalarized.
5977   auto canBeScalarized = [&](Instruction *I) -> bool {
5978     // We only attempt to scalarize instructions forming a single-use chain
5979     // from the original predicated block that would otherwise be vectorized.
5980     // Although not strictly necessary, we give up on instructions we know will
5981     // already be scalar to avoid traversing chains that are unlikely to be
5982     // beneficial.
5983     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
5984         isScalarAfterVectorization(I, VF))
5985       return false;
5986 
5987     // If the instruction is scalar with predication, it will be analyzed
5988     // separately. We ignore it within the context of PredInst.
5989     if (isScalarWithPredication(I))
5990       return false;
5991 
5992     // If any of the instruction's operands are uniform after vectorization,
5993     // the instruction cannot be scalarized. This prevents, for example, a
5994     // masked load from being scalarized.
5995     //
5996     // We assume we will only emit a value for lane zero of an instruction
5997     // marked uniform after vectorization, rather than VF identical values.
5998     // Thus, if we scalarize an instruction that uses a uniform, we would
5999     // create uses of values corresponding to the lanes we aren't emitting code
6000     // for. This behavior can be changed by allowing getScalarValue to clone
6001     // the lane zero values for uniforms rather than asserting.
6002     for (Use &U : I->operands())
6003       if (auto *J = dyn_cast<Instruction>(U.get()))
6004         if (isUniformAfterVectorization(J, VF))
6005           return false;
6006 
6007     // Otherwise, we can scalarize the instruction.
6008     return true;
6009   };
6010 
6011   // Compute the expected cost discount from scalarizing the entire expression
6012   // feeding the predicated instruction. We currently only consider expressions
6013   // that are single-use instruction chains.
6014   Worklist.push_back(PredInst);
6015   while (!Worklist.empty()) {
6016     Instruction *I = Worklist.pop_back_val();
6017 
6018     // If we've already analyzed the instruction, there's nothing to do.
6019     if (ScalarCosts.find(I) != ScalarCosts.end())
6020       continue;
6021 
6022     // Compute the cost of the vector instruction. Note that this cost already
6023     // includes the scalarization overhead of the predicated instruction.
6024     unsigned VectorCost = getInstructionCost(I, VF).first;
6025 
6026     // Compute the cost of the scalarized instruction. This cost is the cost of
6027     // the instruction as if it wasn't if-converted and instead remained in the
6028     // predicated block. We will scale this cost by block probability after
6029     // computing the scalarization overhead.
6030     assert(!VF.isScalable() && "scalable vectors not yet supported.");
6031     unsigned ScalarCost =
6032         VF.getKnownMinValue() *
6033         getInstructionCost(I, ElementCount::getFixed(1)).first;
6034 
6035     // Compute the scalarization overhead of needed insertelement instructions
6036     // and phi nodes.
6037     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6038       ScalarCost += TTI.getScalarizationOverhead(
6039           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6040           APInt::getAllOnesValue(VF.getKnownMinValue()), true, false);
6041       assert(!VF.isScalable() && "scalable vectors not yet supported.");
6042       ScalarCost +=
6043           VF.getKnownMinValue() *
6044           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6045     }
6046 
6047     // Compute the scalarization overhead of needed extractelement
6048     // instructions. For each of the instruction's operands, if the operand can
6049     // be scalarized, add it to the worklist; otherwise, account for the
6050     // overhead.
6051     for (Use &U : I->operands())
6052       if (auto *J = dyn_cast<Instruction>(U.get())) {
6053         assert(VectorType::isValidElementType(J->getType()) &&
6054                "Instruction has non-scalar type");
6055         if (canBeScalarized(J))
6056           Worklist.push_back(J);
6057         else if (needsExtract(J, VF)) {
6058           assert(!VF.isScalable() && "scalable vectors not yet supported.");
6059           ScalarCost += TTI.getScalarizationOverhead(
6060               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6061               APInt::getAllOnesValue(VF.getKnownMinValue()), false, true);
6062         }
6063       }
6064 
6065     // Scale the total scalar cost by block probability.
6066     ScalarCost /= getReciprocalPredBlockProb();
6067 
6068     // Compute the discount. A non-negative discount means the vector version
6069     // of the instruction costs more, and scalarizing would be beneficial.
6070     Discount += VectorCost - ScalarCost;
6071     ScalarCosts[I] = ScalarCost;
6072   }
6073 
6074   return Discount;
6075 }
6076 
6077 LoopVectorizationCostModel::VectorizationCostTy
6078 LoopVectorizationCostModel::expectedCost(ElementCount VF) {
6079   assert(!VF.isScalable() && "scalable vectors not yet supported.");
6080   VectorizationCostTy Cost;
6081 
6082   // For each block.
6083   for (BasicBlock *BB : TheLoop->blocks()) {
6084     VectorizationCostTy BlockCost;
6085 
6086     // For each instruction in the old loop.
6087     for (Instruction &I : BB->instructionsWithoutDebug()) {
6088       // Skip ignored values.
6089       if (ValuesToIgnore.count(&I) ||
6090           (VF.isVector() && VecValuesToIgnore.count(&I)))
6091         continue;
6092 
6093       VectorizationCostTy C = getInstructionCost(&I, VF);
6094 
6095       // Check if we should override the cost.
6096       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
6097         C.first = ForceTargetInstructionCost;
6098 
6099       BlockCost.first += C.first;
6100       BlockCost.second |= C.second;
6101       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6102                         << " for VF " << VF << " For instruction: " << I
6103                         << '\n');
6104     }
6105 
6106     // If we are vectorizing a predicated block, it will have been
6107     // if-converted. This means that the block's instructions (aside from
6108     // stores and instructions that may divide by zero) will now be
6109     // unconditionally executed. For the scalar case, we may not always execute
6110     // the predicated block. Thus, scale the block's cost by the probability of
6111     // executing it.
6112     if (VF.isScalar() && blockNeedsPredication(BB))
6113       BlockCost.first /= getReciprocalPredBlockProb();
6114 
6115     Cost.first += BlockCost.first;
6116     Cost.second |= BlockCost.second;
6117   }
6118 
6119   return Cost;
6120 }
6121 
6122 /// Gets Address Access SCEV after verifying that the access pattern
6123 /// is loop invariant except the induction variable dependence.
6124 ///
6125 /// This SCEV can be sent to the Target in order to estimate the address
6126 /// calculation cost.
6127 static const SCEV *getAddressAccessSCEV(
6128               Value *Ptr,
6129               LoopVectorizationLegality *Legal,
6130               PredicatedScalarEvolution &PSE,
6131               const Loop *TheLoop) {
6132 
6133   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6134   if (!Gep)
6135     return nullptr;
6136 
6137   // We are looking for a gep with all loop invariant indices except for one
6138   // which should be an induction variable.
6139   auto SE = PSE.getSE();
6140   unsigned NumOperands = Gep->getNumOperands();
6141   for (unsigned i = 1; i < NumOperands; ++i) {
6142     Value *Opd = Gep->getOperand(i);
6143     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6144         !Legal->isInductionVariable(Opd))
6145       return nullptr;
6146   }
6147 
6148   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6149   return PSE.getSCEV(Ptr);
6150 }
6151 
6152 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6153   return Legal->hasStride(I->getOperand(0)) ||
6154          Legal->hasStride(I->getOperand(1));
6155 }
6156 
6157 unsigned
6158 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6159                                                         ElementCount VF) {
6160   assert(VF.isVector() &&
6161          "Scalarization cost of instruction implies vectorization.");
6162   assert(!VF.isScalable() && "scalable vectors not yet supported.");
6163   Type *ValTy = getMemInstValueType(I);
6164   auto SE = PSE.getSE();
6165 
6166   unsigned AS = getLoadStoreAddressSpace(I);
6167   Value *Ptr = getLoadStorePointerOperand(I);
6168   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6169 
6170   // Figure out whether the access is strided and get the stride value
6171   // if it's known in compile time
6172   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6173 
6174   // Get the cost of the scalar memory instruction and address computation.
6175   unsigned Cost =
6176       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6177 
6178   // Don't pass *I here, since it is scalar but will actually be part of a
6179   // vectorized loop where the user of it is a vectorized instruction.
6180   const Align Alignment = getLoadStoreAlignment(I);
6181   Cost += VF.getKnownMinValue() *
6182           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6183                               AS, TTI::TCK_RecipThroughput);
6184 
6185   // Get the overhead of the extractelement and insertelement instructions
6186   // we might create due to scalarization.
6187   Cost += getScalarizationOverhead(I, VF);
6188 
6189   // If we have a predicated store, it may not be executed for each vector
6190   // lane. Scale the cost by the probability of executing the predicated
6191   // block.
6192   if (isPredicatedInst(I)) {
6193     Cost /= getReciprocalPredBlockProb();
6194 
6195     if (useEmulatedMaskMemRefHack(I))
6196       // Artificially setting to a high enough value to practically disable
6197       // vectorization with such operations.
6198       Cost = 3000000;
6199   }
6200 
6201   return Cost;
6202 }
6203 
6204 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6205                                                              ElementCount VF) {
6206   Type *ValTy = getMemInstValueType(I);
6207   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6208   Value *Ptr = getLoadStorePointerOperand(I);
6209   unsigned AS = getLoadStoreAddressSpace(I);
6210   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
6211   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6212 
6213   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6214          "Stride should be 1 or -1 for consecutive memory access");
6215   const Align Alignment = getLoadStoreAlignment(I);
6216   unsigned Cost = 0;
6217   if (Legal->isMaskRequired(I))
6218     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6219                                       CostKind);
6220   else
6221     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6222                                 CostKind, I);
6223 
6224   bool Reverse = ConsecutiveStride < 0;
6225   if (Reverse)
6226     Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
6227   return Cost;
6228 }
6229 
6230 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6231                                                          ElementCount VF) {
6232   Type *ValTy = getMemInstValueType(I);
6233   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6234   const Align Alignment = getLoadStoreAlignment(I);
6235   unsigned AS = getLoadStoreAddressSpace(I);
6236   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6237   if (isa<LoadInst>(I)) {
6238     return TTI.getAddressComputationCost(ValTy) +
6239            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6240                                CostKind) +
6241            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6242   }
6243   StoreInst *SI = cast<StoreInst>(I);
6244 
6245   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6246   return TTI.getAddressComputationCost(ValTy) +
6247          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6248                              CostKind) +
6249          (isLoopInvariantStoreValue
6250               ? 0
6251               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6252                                        VF.getKnownMinValue() - 1));
6253 }
6254 
6255 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6256                                                           ElementCount VF) {
6257   Type *ValTy = getMemInstValueType(I);
6258   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6259   const Align Alignment = getLoadStoreAlignment(I);
6260   const Value *Ptr = getLoadStorePointerOperand(I);
6261 
6262   return TTI.getAddressComputationCost(VectorTy) +
6263          TTI.getGatherScatterOpCost(
6264              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6265              TargetTransformInfo::TCK_RecipThroughput, I);
6266 }
6267 
6268 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6269                                                             ElementCount VF) {
6270   Type *ValTy = getMemInstValueType(I);
6271   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6272   unsigned AS = getLoadStoreAddressSpace(I);
6273 
6274   auto Group = getInterleavedAccessGroup(I);
6275   assert(Group && "Fail to get an interleaved access group.");
6276 
6277   unsigned InterleaveFactor = Group->getFactor();
6278   assert(!VF.isScalable() && "scalable vectors not yet supported.");
6279   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6280 
6281   // Holds the indices of existing members in an interleaved load group.
6282   // An interleaved store group doesn't need this as it doesn't allow gaps.
6283   SmallVector<unsigned, 4> Indices;
6284   if (isa<LoadInst>(I)) {
6285     for (unsigned i = 0; i < InterleaveFactor; i++)
6286       if (Group->getMember(i))
6287         Indices.push_back(i);
6288   }
6289 
6290   // Calculate the cost of the whole interleaved group.
6291   bool UseMaskForGaps =
6292       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
6293   unsigned Cost = TTI.getInterleavedMemoryOpCost(
6294       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6295       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6296 
6297   if (Group->isReverse()) {
6298     // TODO: Add support for reversed masked interleaved access.
6299     assert(!Legal->isMaskRequired(I) &&
6300            "Reverse masked interleaved access not supported.");
6301     Cost += Group->getNumMembers() *
6302             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
6303   }
6304   return Cost;
6305 }
6306 
6307 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
6308                                                               ElementCount VF) {
6309   // Calculate scalar cost only. Vectorization cost should be ready at this
6310   // moment.
6311   if (VF.isScalar()) {
6312     Type *ValTy = getMemInstValueType(I);
6313     const Align Alignment = getLoadStoreAlignment(I);
6314     unsigned AS = getLoadStoreAddressSpace(I);
6315 
6316     return TTI.getAddressComputationCost(ValTy) +
6317            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
6318                                TTI::TCK_RecipThroughput, I);
6319   }
6320   return getWideningCost(I, VF);
6321 }
6322 
6323 LoopVectorizationCostModel::VectorizationCostTy
6324 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6325                                                ElementCount VF) {
6326   assert(!VF.isScalable() &&
6327          "the cost model is not yet implemented for scalable vectorization");
6328   // If we know that this instruction will remain uniform, check the cost of
6329   // the scalar version.
6330   if (isUniformAfterVectorization(I, VF))
6331     VF = ElementCount::getFixed(1);
6332 
6333   if (VF.isVector() && isProfitableToScalarize(I, VF))
6334     return VectorizationCostTy(InstsToScalarize[VF][I], false);
6335 
6336   // Forced scalars do not have any scalarization overhead.
6337   auto ForcedScalar = ForcedScalars.find(VF);
6338   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
6339     auto InstSet = ForcedScalar->second;
6340     if (InstSet.count(I))
6341       return VectorizationCostTy(
6342           (getInstructionCost(I, ElementCount::getFixed(1)).first *
6343            VF.getKnownMinValue()),
6344           false);
6345   }
6346 
6347   Type *VectorTy;
6348   unsigned C = getInstructionCost(I, VF, VectorTy);
6349 
6350   bool TypeNotScalarized =
6351       VF.isVector() && VectorTy->isVectorTy() &&
6352       TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue();
6353   return VectorizationCostTy(C, TypeNotScalarized);
6354 }
6355 
6356 unsigned LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
6357                                                               ElementCount VF) {
6358 
6359   assert(!VF.isScalable() &&
6360          "cannot compute scalarization overhead for scalable vectorization");
6361   if (VF.isScalar())
6362     return 0;
6363 
6364   unsigned Cost = 0;
6365   Type *RetTy = ToVectorTy(I->getType(), VF);
6366   if (!RetTy->isVoidTy() &&
6367       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
6368     Cost += TTI.getScalarizationOverhead(
6369         cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()),
6370         true, false);
6371 
6372   // Some targets keep addresses scalar.
6373   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
6374     return Cost;
6375 
6376   // Some targets support efficient element stores.
6377   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
6378     return Cost;
6379 
6380   // Collect operands to consider.
6381   CallInst *CI = dyn_cast<CallInst>(I);
6382   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
6383 
6384   // Skip operands that do not require extraction/scalarization and do not incur
6385   // any overhead.
6386   return Cost + TTI.getOperandsScalarizationOverhead(
6387                     filterExtractingOperands(Ops, VF), VF.getKnownMinValue());
6388 }
6389 
6390 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
6391   assert(!VF.isScalable() && "scalable vectors not yet supported.");
6392   if (VF.isScalar())
6393     return;
6394   NumPredStores = 0;
6395   for (BasicBlock *BB : TheLoop->blocks()) {
6396     // For each instruction in the old loop.
6397     for (Instruction &I : *BB) {
6398       Value *Ptr =  getLoadStorePointerOperand(&I);
6399       if (!Ptr)
6400         continue;
6401 
6402       // TODO: We should generate better code and update the cost model for
6403       // predicated uniform stores. Today they are treated as any other
6404       // predicated store (see added test cases in
6405       // invariant-store-vectorization.ll).
6406       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
6407         NumPredStores++;
6408 
6409       if (Legal->isUniform(Ptr) &&
6410           // Conditional loads and stores should be scalarized and predicated.
6411           // isScalarWithPredication cannot be used here since masked
6412           // gather/scatters are not considered scalar with predication.
6413           !Legal->blockNeedsPredication(I.getParent())) {
6414         // TODO: Avoid replicating loads and stores instead of
6415         // relying on instcombine to remove them.
6416         // Load: Scalar load + broadcast
6417         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
6418         unsigned Cost = getUniformMemOpCost(&I, VF);
6419         setWideningDecision(&I, VF, CM_Scalarize, Cost);
6420         continue;
6421       }
6422 
6423       // We assume that widening is the best solution when possible.
6424       if (memoryInstructionCanBeWidened(&I, VF)) {
6425         unsigned Cost = getConsecutiveMemOpCost(&I, VF);
6426         int ConsecutiveStride =
6427                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
6428         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6429                "Expected consecutive stride.");
6430         InstWidening Decision =
6431             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
6432         setWideningDecision(&I, VF, Decision, Cost);
6433         continue;
6434       }
6435 
6436       // Choose between Interleaving, Gather/Scatter or Scalarization.
6437       unsigned InterleaveCost = std::numeric_limits<unsigned>::max();
6438       unsigned NumAccesses = 1;
6439       if (isAccessInterleaved(&I)) {
6440         auto Group = getInterleavedAccessGroup(&I);
6441         assert(Group && "Fail to get an interleaved access group.");
6442 
6443         // Make one decision for the whole group.
6444         if (getWideningDecision(&I, VF) != CM_Unknown)
6445           continue;
6446 
6447         NumAccesses = Group->getNumMembers();
6448         if (interleavedAccessCanBeWidened(&I, VF))
6449           InterleaveCost = getInterleaveGroupCost(&I, VF);
6450       }
6451 
6452       unsigned GatherScatterCost =
6453           isLegalGatherOrScatter(&I)
6454               ? getGatherScatterCost(&I, VF) * NumAccesses
6455               : std::numeric_limits<unsigned>::max();
6456 
6457       unsigned ScalarizationCost =
6458           getMemInstScalarizationCost(&I, VF) * NumAccesses;
6459 
6460       // Choose better solution for the current VF,
6461       // write down this decision and use it during vectorization.
6462       unsigned Cost;
6463       InstWidening Decision;
6464       if (InterleaveCost <= GatherScatterCost &&
6465           InterleaveCost < ScalarizationCost) {
6466         Decision = CM_Interleave;
6467         Cost = InterleaveCost;
6468       } else if (GatherScatterCost < ScalarizationCost) {
6469         Decision = CM_GatherScatter;
6470         Cost = GatherScatterCost;
6471       } else {
6472         Decision = CM_Scalarize;
6473         Cost = ScalarizationCost;
6474       }
6475       // If the instructions belongs to an interleave group, the whole group
6476       // receives the same decision. The whole group receives the cost, but
6477       // the cost will actually be assigned to one instruction.
6478       if (auto Group = getInterleavedAccessGroup(&I))
6479         setWideningDecision(Group, VF, Decision, Cost);
6480       else
6481         setWideningDecision(&I, VF, Decision, Cost);
6482     }
6483   }
6484 
6485   // Make sure that any load of address and any other address computation
6486   // remains scalar unless there is gather/scatter support. This avoids
6487   // inevitable extracts into address registers, and also has the benefit of
6488   // activating LSR more, since that pass can't optimize vectorized
6489   // addresses.
6490   if (TTI.prefersVectorizedAddressing())
6491     return;
6492 
6493   // Start with all scalar pointer uses.
6494   SmallPtrSet<Instruction *, 8> AddrDefs;
6495   for (BasicBlock *BB : TheLoop->blocks())
6496     for (Instruction &I : *BB) {
6497       Instruction *PtrDef =
6498         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
6499       if (PtrDef && TheLoop->contains(PtrDef) &&
6500           getWideningDecision(&I, VF) != CM_GatherScatter)
6501         AddrDefs.insert(PtrDef);
6502     }
6503 
6504   // Add all instructions used to generate the addresses.
6505   SmallVector<Instruction *, 4> Worklist;
6506   for (auto *I : AddrDefs)
6507     Worklist.push_back(I);
6508   while (!Worklist.empty()) {
6509     Instruction *I = Worklist.pop_back_val();
6510     for (auto &Op : I->operands())
6511       if (auto *InstOp = dyn_cast<Instruction>(Op))
6512         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
6513             AddrDefs.insert(InstOp).second)
6514           Worklist.push_back(InstOp);
6515   }
6516 
6517   for (auto *I : AddrDefs) {
6518     if (isa<LoadInst>(I)) {
6519       // Setting the desired widening decision should ideally be handled in
6520       // by cost functions, but since this involves the task of finding out
6521       // if the loaded register is involved in an address computation, it is
6522       // instead changed here when we know this is the case.
6523       InstWidening Decision = getWideningDecision(I, VF);
6524       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
6525         // Scalarize a widened load of address.
6526         setWideningDecision(
6527             I, VF, CM_Scalarize,
6528             (VF.getKnownMinValue() *
6529              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
6530       else if (auto Group = getInterleavedAccessGroup(I)) {
6531         // Scalarize an interleave group of address loads.
6532         for (unsigned I = 0; I < Group->getFactor(); ++I) {
6533           if (Instruction *Member = Group->getMember(I))
6534             setWideningDecision(
6535                 Member, VF, CM_Scalarize,
6536                 (VF.getKnownMinValue() *
6537                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
6538         }
6539       }
6540     } else
6541       // Make sure I gets scalarized and a cost estimate without
6542       // scalarization overhead.
6543       ForcedScalars[VF].insert(I);
6544   }
6545 }
6546 
6547 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6548                                                         ElementCount VF,
6549                                                         Type *&VectorTy) {
6550   Type *RetTy = I->getType();
6551   if (canTruncateToMinimalBitwidth(I, VF))
6552     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
6553   VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
6554   auto SE = PSE.getSE();
6555   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6556 
6557   // TODO: We need to estimate the cost of intrinsic calls.
6558   switch (I->getOpcode()) {
6559   case Instruction::GetElementPtr:
6560     // We mark this instruction as zero-cost because the cost of GEPs in
6561     // vectorized code depends on whether the corresponding memory instruction
6562     // is scalarized or not. Therefore, we handle GEPs with the memory
6563     // instruction cost.
6564     return 0;
6565   case Instruction::Br: {
6566     // In cases of scalarized and predicated instructions, there will be VF
6567     // predicated blocks in the vectorized loop. Each branch around these
6568     // blocks requires also an extract of its vector compare i1 element.
6569     bool ScalarPredicatedBB = false;
6570     BranchInst *BI = cast<BranchInst>(I);
6571     if (VF.isVector() && BI->isConditional() &&
6572         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
6573          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
6574       ScalarPredicatedBB = true;
6575 
6576     if (ScalarPredicatedBB) {
6577       // Return cost for branches around scalarized and predicated blocks.
6578       assert(!VF.isScalable() && "scalable vectors not yet supported.");
6579       auto *Vec_i1Ty =
6580           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
6581       return (TTI.getScalarizationOverhead(
6582                   Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()),
6583                   false, true) +
6584               (TTI.getCFInstrCost(Instruction::Br, CostKind) *
6585                VF.getKnownMinValue()));
6586     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
6587       // The back-edge branch will remain, as will all scalar branches.
6588       return TTI.getCFInstrCost(Instruction::Br, CostKind);
6589     else
6590       // This branch will be eliminated by if-conversion.
6591       return 0;
6592     // Note: We currently assume zero cost for an unconditional branch inside
6593     // a predicated block since it will become a fall-through, although we
6594     // may decide in the future to call TTI for all branches.
6595   }
6596   case Instruction::PHI: {
6597     auto *Phi = cast<PHINode>(I);
6598 
6599     // First-order recurrences are replaced by vector shuffles inside the loop.
6600     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
6601     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
6602       return TTI.getShuffleCost(
6603           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
6604           VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
6605 
6606     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
6607     // converted into select instructions. We require N - 1 selects per phi
6608     // node, where N is the number of incoming values.
6609     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
6610       return (Phi->getNumIncomingValues() - 1) *
6611              TTI.getCmpSelInstrCost(
6612                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
6613                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
6614                  CostKind);
6615 
6616     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
6617   }
6618   case Instruction::UDiv:
6619   case Instruction::SDiv:
6620   case Instruction::URem:
6621   case Instruction::SRem:
6622     // If we have a predicated instruction, it may not be executed for each
6623     // vector lane. Get the scalarization cost and scale this amount by the
6624     // probability of executing the predicated block. If the instruction is not
6625     // predicated, we fall through to the next case.
6626     if (VF.isVector() && isScalarWithPredication(I)) {
6627       unsigned Cost = 0;
6628 
6629       // These instructions have a non-void type, so account for the phi nodes
6630       // that we will create. This cost is likely to be zero. The phi node
6631       // cost, if any, should be scaled by the block probability because it
6632       // models a copy at the end of each predicated block.
6633       Cost += VF.getKnownMinValue() *
6634               TTI.getCFInstrCost(Instruction::PHI, CostKind);
6635 
6636       // The cost of the non-predicated instruction.
6637       Cost += VF.getKnownMinValue() *
6638               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
6639 
6640       // The cost of insertelement and extractelement instructions needed for
6641       // scalarization.
6642       Cost += getScalarizationOverhead(I, VF);
6643 
6644       // Scale the cost by the probability of executing the predicated blocks.
6645       // This assumes the predicated block for each vector lane is equally
6646       // likely.
6647       return Cost / getReciprocalPredBlockProb();
6648     }
6649     LLVM_FALLTHROUGH;
6650   case Instruction::Add:
6651   case Instruction::FAdd:
6652   case Instruction::Sub:
6653   case Instruction::FSub:
6654   case Instruction::Mul:
6655   case Instruction::FMul:
6656   case Instruction::FDiv:
6657   case Instruction::FRem:
6658   case Instruction::Shl:
6659   case Instruction::LShr:
6660   case Instruction::AShr:
6661   case Instruction::And:
6662   case Instruction::Or:
6663   case Instruction::Xor: {
6664     // Since we will replace the stride by 1 the multiplication should go away.
6665     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
6666       return 0;
6667     // Certain instructions can be cheaper to vectorize if they have a constant
6668     // second vector operand. One example of this are shifts on x86.
6669     Value *Op2 = I->getOperand(1);
6670     TargetTransformInfo::OperandValueProperties Op2VP;
6671     TargetTransformInfo::OperandValueKind Op2VK =
6672         TTI.getOperandInfo(Op2, Op2VP);
6673     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
6674       Op2VK = TargetTransformInfo::OK_UniformValue;
6675 
6676     SmallVector<const Value *, 4> Operands(I->operand_values());
6677     unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1;
6678     return N * TTI.getArithmeticInstrCost(
6679                    I->getOpcode(), VectorTy, CostKind,
6680                    TargetTransformInfo::OK_AnyValue,
6681                    Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
6682   }
6683   case Instruction::FNeg: {
6684     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
6685     unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1;
6686     return N * TTI.getArithmeticInstrCost(
6687                    I->getOpcode(), VectorTy, CostKind,
6688                    TargetTransformInfo::OK_AnyValue,
6689                    TargetTransformInfo::OK_AnyValue,
6690                    TargetTransformInfo::OP_None, TargetTransformInfo::OP_None,
6691                    I->getOperand(0), I);
6692   }
6693   case Instruction::Select: {
6694     SelectInst *SI = cast<SelectInst>(I);
6695     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
6696     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
6697     Type *CondTy = SI->getCondition()->getType();
6698     if (!ScalarCond) {
6699       assert(!VF.isScalable() && "VF is assumed to be non scalable.");
6700       CondTy = VectorType::get(CondTy, VF);
6701     }
6702     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy,
6703                                   CostKind, I);
6704   }
6705   case Instruction::ICmp:
6706   case Instruction::FCmp: {
6707     Type *ValTy = I->getOperand(0)->getType();
6708     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
6709     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
6710       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
6711     VectorTy = ToVectorTy(ValTy, VF);
6712     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, CostKind,
6713                                   I);
6714   }
6715   case Instruction::Store:
6716   case Instruction::Load: {
6717     ElementCount Width = VF;
6718     if (Width.isVector()) {
6719       InstWidening Decision = getWideningDecision(I, Width);
6720       assert(Decision != CM_Unknown &&
6721              "CM decision should be taken at this point");
6722       if (Decision == CM_Scalarize)
6723         Width = ElementCount::getFixed(1);
6724     }
6725     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
6726     return getMemoryInstructionCost(I, VF);
6727   }
6728   case Instruction::ZExt:
6729   case Instruction::SExt:
6730   case Instruction::FPToUI:
6731   case Instruction::FPToSI:
6732   case Instruction::FPExt:
6733   case Instruction::PtrToInt:
6734   case Instruction::IntToPtr:
6735   case Instruction::SIToFP:
6736   case Instruction::UIToFP:
6737   case Instruction::Trunc:
6738   case Instruction::FPTrunc:
6739   case Instruction::BitCast: {
6740     // Computes the CastContextHint from a Load/Store instruction.
6741     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
6742       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
6743              "Expected a load or a store!");
6744 
6745       if (VF.isScalar() || !TheLoop->contains(I))
6746         return TTI::CastContextHint::Normal;
6747 
6748       switch (getWideningDecision(I, VF)) {
6749       case LoopVectorizationCostModel::CM_GatherScatter:
6750         return TTI::CastContextHint::GatherScatter;
6751       case LoopVectorizationCostModel::CM_Interleave:
6752         return TTI::CastContextHint::Interleave;
6753       case LoopVectorizationCostModel::CM_Scalarize:
6754       case LoopVectorizationCostModel::CM_Widen:
6755         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
6756                                         : TTI::CastContextHint::Normal;
6757       case LoopVectorizationCostModel::CM_Widen_Reverse:
6758         return TTI::CastContextHint::Reversed;
6759       case LoopVectorizationCostModel::CM_Unknown:
6760         llvm_unreachable("Instr did not go through cost modelling?");
6761       }
6762 
6763       llvm_unreachable("Unhandled case!");
6764     };
6765 
6766     unsigned Opcode = I->getOpcode();
6767     TTI::CastContextHint CCH = TTI::CastContextHint::None;
6768     // For Trunc, the context is the only user, which must be a StoreInst.
6769     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
6770       if (I->hasOneUse())
6771         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
6772           CCH = ComputeCCH(Store);
6773     }
6774     // For Z/Sext, the context is the operand, which must be a LoadInst.
6775     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
6776              Opcode == Instruction::FPExt) {
6777       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
6778         CCH = ComputeCCH(Load);
6779     }
6780 
6781     // We optimize the truncation of induction variables having constant
6782     // integer steps. The cost of these truncations is the same as the scalar
6783     // operation.
6784     if (isOptimizableIVTruncate(I, VF)) {
6785       auto *Trunc = cast<TruncInst>(I);
6786       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6787                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
6788     }
6789 
6790     Type *SrcScalarTy = I->getOperand(0)->getType();
6791     Type *SrcVecTy =
6792         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6793     if (canTruncateToMinimalBitwidth(I, VF)) {
6794       // This cast is going to be shrunk. This may remove the cast or it might
6795       // turn it into slightly different cast. For example, if MinBW == 16,
6796       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
6797       //
6798       // Calculate the modified src and dest types.
6799       Type *MinVecTy = VectorTy;
6800       if (Opcode == Instruction::Trunc) {
6801         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
6802         VectorTy =
6803             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6804       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
6805         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
6806         VectorTy =
6807             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6808       }
6809     }
6810 
6811     assert(!VF.isScalable() && "VF is assumed to be non scalable");
6812     unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1;
6813     return N *
6814            TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
6815   }
6816   case Instruction::Call: {
6817     bool NeedToScalarize;
6818     CallInst *CI = cast<CallInst>(I);
6819     unsigned CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
6820     if (getVectorIntrinsicIDForCall(CI, TLI))
6821       return std::min(CallCost, getVectorIntrinsicCost(CI, VF));
6822     return CallCost;
6823   }
6824   default:
6825     // The cost of executing VF copies of the scalar instruction. This opcode
6826     // is unknown. Assume that it is the same as 'mul'.
6827     return VF.getKnownMinValue() * TTI.getArithmeticInstrCost(
6828                                        Instruction::Mul, VectorTy, CostKind) +
6829            getScalarizationOverhead(I, VF);
6830   } // end of switch.
6831 }
6832 
6833 char LoopVectorize::ID = 0;
6834 
6835 static const char lv_name[] = "Loop Vectorization";
6836 
6837 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
6838 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
6839 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
6840 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
6841 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
6842 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
6843 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
6844 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
6845 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
6846 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
6847 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
6848 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
6849 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
6850 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
6851 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
6852 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
6853 
6854 namespace llvm {
6855 
6856 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
6857 
6858 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
6859                               bool VectorizeOnlyWhenForced) {
6860   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
6861 }
6862 
6863 } // end namespace llvm
6864 
6865 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
6866   // Check if the pointer operand of a load or store instruction is
6867   // consecutive.
6868   if (auto *Ptr = getLoadStorePointerOperand(Inst))
6869     return Legal->isConsecutivePtr(Ptr);
6870   return false;
6871 }
6872 
6873 void LoopVectorizationCostModel::collectValuesToIgnore() {
6874   // Ignore ephemeral values.
6875   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
6876 
6877   // Ignore type-promoting instructions we identified during reduction
6878   // detection.
6879   for (auto &Reduction : Legal->getReductionVars()) {
6880     RecurrenceDescriptor &RedDes = Reduction.second;
6881     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6882     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6883   }
6884   // Ignore type-casting instructions we identified during induction
6885   // detection.
6886   for (auto &Induction : Legal->getInductionVars()) {
6887     InductionDescriptor &IndDes = Induction.second;
6888     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6889     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6890   }
6891 }
6892 
6893 void LoopVectorizationCostModel::collectInLoopReductions() {
6894   for (auto &Reduction : Legal->getReductionVars()) {
6895     PHINode *Phi = Reduction.first;
6896     RecurrenceDescriptor &RdxDesc = Reduction.second;
6897 
6898     // We don't collect reductions that are type promoted (yet).
6899     if (RdxDesc.getRecurrenceType() != Phi->getType())
6900       continue;
6901 
6902     // If the target would prefer this reduction to happen "in-loop", then we
6903     // want to record it as such.
6904     unsigned Opcode = RdxDesc.getRecurrenceBinOp(RdxDesc.getRecurrenceKind());
6905     if (!PreferInLoopReductions &&
6906         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
6907                                    TargetTransformInfo::ReductionFlags()))
6908       continue;
6909 
6910     // Check that we can correctly put the reductions into the loop, by
6911     // finding the chain of operations that leads from the phi to the loop
6912     // exit value.
6913     SmallVector<Instruction *, 4> ReductionOperations =
6914         RdxDesc.getReductionOpChain(Phi, TheLoop);
6915     bool InLoop = !ReductionOperations.empty();
6916     if (InLoop)
6917       InLoopReductionChains[Phi] = ReductionOperations;
6918     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
6919                       << " reduction for phi: " << *Phi << "\n");
6920   }
6921 }
6922 
6923 // TODO: we could return a pair of values that specify the max VF and
6924 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
6925 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
6926 // doesn't have a cost model that can choose which plan to execute if
6927 // more than one is generated.
6928 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
6929                                  LoopVectorizationCostModel &CM) {
6930   unsigned WidestType;
6931   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
6932   return WidestVectorRegBits / WidestType;
6933 }
6934 
6935 VectorizationFactor
6936 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
6937   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
6938   ElementCount VF = UserVF;
6939   // Outer loop handling: They may require CFG and instruction level
6940   // transformations before even evaluating whether vectorization is profitable.
6941   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6942   // the vectorization pipeline.
6943   if (!OrigLoop->isInnermost()) {
6944     // If the user doesn't provide a vectorization factor, determine a
6945     // reasonable one.
6946     if (UserVF.isZero()) {
6947       VF = ElementCount::getFixed(
6948           determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM));
6949       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
6950 
6951       // Make sure we have a VF > 1 for stress testing.
6952       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
6953         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
6954                           << "overriding computed VF.\n");
6955         VF = ElementCount::getFixed(4);
6956       }
6957     }
6958     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6959     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
6960            "VF needs to be a power of two");
6961     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
6962                       << "VF " << VF << " to build VPlans.\n");
6963     buildVPlans(VF.getKnownMinValue(), VF.getKnownMinValue());
6964 
6965     // For VPlan build stress testing, we bail out after VPlan construction.
6966     if (VPlanBuildStressTest)
6967       return VectorizationFactor::Disabled();
6968 
6969     return {VF, 0 /*Cost*/};
6970   }
6971 
6972   LLVM_DEBUG(
6973       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6974                 "VPlan-native path.\n");
6975   return VectorizationFactor::Disabled();
6976 }
6977 
6978 Optional<VectorizationFactor>
6979 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
6980   assert(!UserVF.isScalable() && "scalable vectorization not yet handled");
6981   assert(OrigLoop->isInnermost() && "Inner loop expected.");
6982   Optional<unsigned> MaybeMaxVF =
6983       CM.computeMaxVF(UserVF.getKnownMinValue(), UserIC);
6984   if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved.
6985     return None;
6986 
6987   // Invalidate interleave groups if all blocks of loop will be predicated.
6988   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
6989       !useMaskedInterleavedAccesses(*TTI)) {
6990     LLVM_DEBUG(
6991         dbgs()
6992         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
6993            "which requires masked-interleaved support.\n");
6994     if (CM.InterleaveInfo.invalidateGroups())
6995       // Invalidating interleave groups also requires invalidating all decisions
6996       // based on them, which includes widening decisions and uniform and scalar
6997       // values.
6998       CM.invalidateCostModelingDecisions();
6999   }
7000 
7001   if (!UserVF.isZero()) {
7002     LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
7003     assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
7004            "VF needs to be a power of two");
7005     // Collect the instructions (and their associated costs) that will be more
7006     // profitable to scalarize.
7007     CM.selectUserVectorizationFactor(UserVF);
7008     CM.collectInLoopReductions();
7009     buildVPlansWithVPRecipes(UserVF.getKnownMinValue(),
7010                              UserVF.getKnownMinValue());
7011     LLVM_DEBUG(printPlans(dbgs()));
7012     return {{UserVF, 0}};
7013   }
7014 
7015   unsigned MaxVF = MaybeMaxVF.getValue();
7016   assert(MaxVF != 0 && "MaxVF is zero.");
7017 
7018   for (unsigned VF = 1; VF <= MaxVF; VF *= 2) {
7019     // Collect Uniform and Scalar instructions after vectorization with VF.
7020     CM.collectUniformsAndScalars(ElementCount::getFixed(VF));
7021 
7022     // Collect the instructions (and their associated costs) that will be more
7023     // profitable to scalarize.
7024     if (VF > 1)
7025       CM.collectInstsToScalarize(ElementCount::getFixed(VF));
7026   }
7027 
7028   CM.collectInLoopReductions();
7029 
7030   buildVPlansWithVPRecipes(1, MaxVF);
7031   LLVM_DEBUG(printPlans(dbgs()));
7032   if (MaxVF == 1)
7033     return VectorizationFactor::Disabled();
7034 
7035   // Select the optimal vectorization factor.
7036   return CM.selectVectorizationFactor(MaxVF);
7037 }
7038 
7039 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) {
7040   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
7041                     << '\n');
7042   BestVF = VF;
7043   BestUF = UF;
7044 
7045   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
7046     return !Plan->hasVF(VF);
7047   });
7048   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
7049 }
7050 
7051 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
7052                                            DominatorTree *DT) {
7053   // Perform the actual loop transformation.
7054 
7055   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7056   VPCallbackILV CallbackILV(ILV);
7057 
7058   assert(BestVF.hasValue() && "Vectorization Factor is missing");
7059 
7060   VPTransformState State{*BestVF, BestUF,      LI,
7061                          DT,      ILV.Builder, ILV.VectorLoopValueMap,
7062                          &ILV,    CallbackILV};
7063   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
7064   State.TripCount = ILV.getOrCreateTripCount(nullptr);
7065   State.CanonicalIV = ILV.Induction;
7066 
7067   //===------------------------------------------------===//
7068   //
7069   // Notice: any optimization or new instruction that go
7070   // into the code below should also be implemented in
7071   // the cost-model.
7072   //
7073   //===------------------------------------------------===//
7074 
7075   // 2. Copy and widen instructions from the old loop into the new loop.
7076   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
7077   VPlans.front()->execute(&State);
7078 
7079   // 3. Fix the vectorized code: take care of header phi's, live-outs,
7080   //    predication, updating analyses.
7081   ILV.fixVectorizedLoop();
7082 }
7083 
7084 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
7085     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
7086   BasicBlock *Latch = OrigLoop->getLoopLatch();
7087 
7088   // We create new control-flow for the vectorized loop, so the original
7089   // condition will be dead after vectorization if it's only used by the
7090   // branch.
7091   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
7092   if (Cmp && Cmp->hasOneUse()) {
7093     DeadInstructions.insert(Cmp);
7094 
7095     // The operands of the icmp is often a dead trunc, used by IndUpdate.
7096     for (Value *Op : Cmp->operands()) {
7097       if (isa<TruncInst>(Op) && Op->hasOneUse())
7098           DeadInstructions.insert(cast<Instruction>(Op));
7099     }
7100   }
7101 
7102   // We create new "steps" for induction variable updates to which the original
7103   // induction variables map. An original update instruction will be dead if
7104   // all its users except the induction variable are dead.
7105   for (auto &Induction : Legal->getInductionVars()) {
7106     PHINode *Ind = Induction.first;
7107     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
7108     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
7109           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
7110         }))
7111       DeadInstructions.insert(IndUpdate);
7112 
7113     // We record as "Dead" also the type-casting instructions we had identified
7114     // during induction analysis. We don't need any handling for them in the
7115     // vectorized loop because we have proven that, under a proper runtime
7116     // test guarding the vectorized loop, the value of the phi, and the casted
7117     // value of the phi, are the same. The last instruction in this casting chain
7118     // will get its scalar/vector/widened def from the scalar/vector/widened def
7119     // of the respective phi node. Any other casts in the induction def-use chain
7120     // have no other uses outside the phi update chain, and will be ignored.
7121     InductionDescriptor &IndDes = Induction.second;
7122     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7123     DeadInstructions.insert(Casts.begin(), Casts.end());
7124   }
7125 }
7126 
7127 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
7128 
7129 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
7130 
7131 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
7132                                         Instruction::BinaryOps BinOp) {
7133   // When unrolling and the VF is 1, we only need to add a simple scalar.
7134   Type *Ty = Val->getType();
7135   assert(!Ty->isVectorTy() && "Val must be a scalar");
7136 
7137   if (Ty->isFloatingPointTy()) {
7138     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
7139 
7140     // Floating point operations had to be 'fast' to enable the unrolling.
7141     Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
7142     return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
7143   }
7144   Constant *C = ConstantInt::get(Ty, StartIdx);
7145   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
7146 }
7147 
7148 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
7149   SmallVector<Metadata *, 4> MDs;
7150   // Reserve first location for self reference to the LoopID metadata node.
7151   MDs.push_back(nullptr);
7152   bool IsUnrollMetadata = false;
7153   MDNode *LoopID = L->getLoopID();
7154   if (LoopID) {
7155     // First find existing loop unrolling disable metadata.
7156     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
7157       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
7158       if (MD) {
7159         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
7160         IsUnrollMetadata =
7161             S && S->getString().startswith("llvm.loop.unroll.disable");
7162       }
7163       MDs.push_back(LoopID->getOperand(i));
7164     }
7165   }
7166 
7167   if (!IsUnrollMetadata) {
7168     // Add runtime unroll disable metadata.
7169     LLVMContext &Context = L->getHeader()->getContext();
7170     SmallVector<Metadata *, 1> DisableOperands;
7171     DisableOperands.push_back(
7172         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
7173     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
7174     MDs.push_back(DisableNode);
7175     MDNode *NewLoopID = MDNode::get(Context, MDs);
7176     // Set operand 0 to refer to the loop id itself.
7177     NewLoopID->replaceOperandWith(0, NewLoopID);
7178     L->setLoopID(NewLoopID);
7179   }
7180 }
7181 
7182 bool LoopVectorizationPlanner::getDecisionAndClampRange(
7183     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
7184   assert(Range.End > Range.Start && "Trying to test an empty VF range.");
7185   bool PredicateAtRangeStart = Predicate(ElementCount::getFixed(Range.Start));
7186 
7187   for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2)
7188     if (Predicate(ElementCount::getFixed(TmpVF)) != PredicateAtRangeStart) {
7189       Range.End = TmpVF;
7190       break;
7191     }
7192 
7193   return PredicateAtRangeStart;
7194 }
7195 
7196 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
7197 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
7198 /// of VF's starting at a given VF and extending it as much as possible. Each
7199 /// vectorization decision can potentially shorten this sub-range during
7200 /// buildVPlan().
7201 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) {
7202   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
7203     VFRange SubRange = {VF, MaxVF + 1};
7204     VPlans.push_back(buildVPlan(SubRange));
7205     VF = SubRange.End;
7206   }
7207 }
7208 
7209 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
7210                                          VPlanPtr &Plan) {
7211   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
7212 
7213   // Look for cached value.
7214   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
7215   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
7216   if (ECEntryIt != EdgeMaskCache.end())
7217     return ECEntryIt->second;
7218 
7219   VPValue *SrcMask = createBlockInMask(Src, Plan);
7220 
7221   // The terminator has to be a branch inst!
7222   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
7223   assert(BI && "Unexpected terminator found");
7224 
7225   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
7226     return EdgeMaskCache[Edge] = SrcMask;
7227 
7228   VPValue *EdgeMask = Plan->getVPValue(BI->getCondition());
7229   assert(EdgeMask && "No Edge Mask found for condition");
7230 
7231   if (BI->getSuccessor(0) != Dst)
7232     EdgeMask = Builder.createNot(EdgeMask);
7233 
7234   if (SrcMask) // Otherwise block in-mask is all-one, no need to AND.
7235     EdgeMask = Builder.createAnd(EdgeMask, SrcMask);
7236 
7237   return EdgeMaskCache[Edge] = EdgeMask;
7238 }
7239 
7240 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
7241   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
7242 
7243   // Look for cached value.
7244   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
7245   if (BCEntryIt != BlockMaskCache.end())
7246     return BCEntryIt->second;
7247 
7248   // All-one mask is modelled as no-mask following the convention for masked
7249   // load/store/gather/scatter. Initialize BlockMask to no-mask.
7250   VPValue *BlockMask = nullptr;
7251 
7252   if (OrigLoop->getHeader() == BB) {
7253     if (!CM.blockNeedsPredication(BB))
7254       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
7255 
7256     // Create the block in mask as the first non-phi instruction in the block.
7257     VPBuilder::InsertPointGuard Guard(Builder);
7258     auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi();
7259     Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint);
7260 
7261     // Introduce the early-exit compare IV <= BTC to form header block mask.
7262     // This is used instead of IV < TC because TC may wrap, unlike BTC.
7263     // Start by constructing the desired canonical IV.
7264     VPValue *IV = nullptr;
7265     if (Legal->getPrimaryInduction())
7266       IV = Plan->getVPValue(Legal->getPrimaryInduction());
7267     else {
7268       auto IVRecipe = new VPWidenCanonicalIVRecipe();
7269       Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint);
7270       IV = IVRecipe->getVPValue();
7271     }
7272     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
7273     bool TailFolded = !CM.isScalarEpilogueAllowed();
7274 
7275     if (TailFolded && CM.TTI.emitGetActiveLaneMask()) {
7276       // While ActiveLaneMask is a binary op that consumes the loop tripcount
7277       // as a second argument, we only pass the IV here and extract the
7278       // tripcount from the transform state where codegen of the VP instructions
7279       // happen.
7280       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV});
7281     } else {
7282       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
7283     }
7284     return BlockMaskCache[BB] = BlockMask;
7285   }
7286 
7287   // This is the block mask. We OR all incoming edges.
7288   for (auto *Predecessor : predecessors(BB)) {
7289     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
7290     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
7291       return BlockMaskCache[BB] = EdgeMask;
7292 
7293     if (!BlockMask) { // BlockMask has its initialized nullptr value.
7294       BlockMask = EdgeMask;
7295       continue;
7296     }
7297 
7298     BlockMask = Builder.createOr(BlockMask, EdgeMask);
7299   }
7300 
7301   return BlockMaskCache[BB] = BlockMask;
7302 }
7303 
7304 VPWidenMemoryInstructionRecipe *
7305 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range,
7306                                   VPlanPtr &Plan) {
7307   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7308          "Must be called with either a load or store");
7309 
7310   auto willWiden = [&](ElementCount VF) -> bool {
7311     assert(!VF.isScalable() && "unexpected scalable ElementCount");
7312     if (VF.isScalar())
7313       return false;
7314     LoopVectorizationCostModel::InstWidening Decision =
7315         CM.getWideningDecision(I, VF);
7316     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
7317            "CM decision should be taken at this point.");
7318     if (Decision == LoopVectorizationCostModel::CM_Interleave)
7319       return true;
7320     if (CM.isScalarAfterVectorization(I, VF) ||
7321         CM.isProfitableToScalarize(I, VF))
7322       return false;
7323     return Decision != LoopVectorizationCostModel::CM_Scalarize;
7324   };
7325 
7326   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
7327     return nullptr;
7328 
7329   VPValue *Mask = nullptr;
7330   if (Legal->isMaskRequired(I))
7331     Mask = createBlockInMask(I->getParent(), Plan);
7332 
7333   VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I));
7334   if (LoadInst *Load = dyn_cast<LoadInst>(I))
7335     return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask);
7336 
7337   StoreInst *Store = cast<StoreInst>(I);
7338   VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand());
7339   return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask);
7340 }
7341 
7342 VPWidenIntOrFpInductionRecipe *
7343 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi) const {
7344   // Check if this is an integer or fp induction. If so, build the recipe that
7345   // produces its scalar and vector values.
7346   InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
7347   if (II.getKind() == InductionDescriptor::IK_IntInduction ||
7348       II.getKind() == InductionDescriptor::IK_FpInduction)
7349     return new VPWidenIntOrFpInductionRecipe(Phi);
7350 
7351   return nullptr;
7352 }
7353 
7354 VPWidenIntOrFpInductionRecipe *
7355 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I,
7356                                                 VFRange &Range) const {
7357   // Optimize the special case where the source is a constant integer
7358   // induction variable. Notice that we can only optimize the 'trunc' case
7359   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
7360   // (c) other casts depend on pointer size.
7361 
7362   // Determine whether \p K is a truncation based on an induction variable that
7363   // can be optimized.
7364   auto isOptimizableIVTruncate =
7365       [&](Instruction *K) -> std::function<bool(ElementCount)> {
7366     return [=](ElementCount VF) -> bool {
7367       return CM.isOptimizableIVTruncate(K, VF);
7368     };
7369   };
7370 
7371   if (LoopVectorizationPlanner::getDecisionAndClampRange(
7372           isOptimizableIVTruncate(I), Range))
7373     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
7374                                              I);
7375   return nullptr;
7376 }
7377 
7378 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) {
7379   // We know that all PHIs in non-header blocks are converted into selects, so
7380   // we don't have to worry about the insertion order and we can just use the
7381   // builder. At this point we generate the predication tree. There may be
7382   // duplications since this is a simple recursive scan, but future
7383   // optimizations will clean it up.
7384 
7385   SmallVector<VPValue *, 2> Operands;
7386   unsigned NumIncoming = Phi->getNumIncomingValues();
7387   for (unsigned In = 0; In < NumIncoming; In++) {
7388     VPValue *EdgeMask =
7389       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
7390     assert((EdgeMask || NumIncoming == 1) &&
7391            "Multiple predecessors with one having a full mask");
7392     Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In)));
7393     if (EdgeMask)
7394       Operands.push_back(EdgeMask);
7395   }
7396   return new VPBlendRecipe(Phi, Operands);
7397 }
7398 
7399 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range,
7400                                                    VPlan &Plan) const {
7401 
7402   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
7403       [this, CI](ElementCount VF) {
7404         return CM.isScalarWithPredication(CI, VF);
7405       },
7406       Range);
7407 
7408   if (IsPredicated)
7409     return nullptr;
7410 
7411   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
7412   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
7413              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect))
7414     return nullptr;
7415 
7416   auto willWiden = [&](ElementCount VF) -> bool {
7417     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
7418     // The following case may be scalarized depending on the VF.
7419     // The flag shows whether we use Intrinsic or a usual Call for vectorized
7420     // version of the instruction.
7421     // Is it beneficial to perform intrinsic call compared to lib call?
7422     bool NeedToScalarize = false;
7423     unsigned CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
7424     bool UseVectorIntrinsic =
7425         ID && CM.getVectorIntrinsicCost(CI, VF) <= CallCost;
7426     return UseVectorIntrinsic || !NeedToScalarize;
7427   };
7428 
7429   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
7430     return nullptr;
7431 
7432   return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands()));
7433 }
7434 
7435 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
7436   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
7437          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
7438   // Instruction should be widened, unless it is scalar after vectorization,
7439   // scalarization is profitable or it is predicated.
7440   auto WillScalarize = [this, I](ElementCount VF) -> bool {
7441     return CM.isScalarAfterVectorization(I, VF) ||
7442            CM.isProfitableToScalarize(I, VF) ||
7443            CM.isScalarWithPredication(I, VF);
7444   };
7445   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
7446                                                              Range);
7447 }
7448 
7449 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const {
7450   auto IsVectorizableOpcode = [](unsigned Opcode) {
7451     switch (Opcode) {
7452     case Instruction::Add:
7453     case Instruction::And:
7454     case Instruction::AShr:
7455     case Instruction::BitCast:
7456     case Instruction::FAdd:
7457     case Instruction::FCmp:
7458     case Instruction::FDiv:
7459     case Instruction::FMul:
7460     case Instruction::FNeg:
7461     case Instruction::FPExt:
7462     case Instruction::FPToSI:
7463     case Instruction::FPToUI:
7464     case Instruction::FPTrunc:
7465     case Instruction::FRem:
7466     case Instruction::FSub:
7467     case Instruction::ICmp:
7468     case Instruction::IntToPtr:
7469     case Instruction::LShr:
7470     case Instruction::Mul:
7471     case Instruction::Or:
7472     case Instruction::PtrToInt:
7473     case Instruction::SDiv:
7474     case Instruction::Select:
7475     case Instruction::SExt:
7476     case Instruction::Shl:
7477     case Instruction::SIToFP:
7478     case Instruction::SRem:
7479     case Instruction::Sub:
7480     case Instruction::Trunc:
7481     case Instruction::UDiv:
7482     case Instruction::UIToFP:
7483     case Instruction::URem:
7484     case Instruction::Xor:
7485     case Instruction::ZExt:
7486       return true;
7487     }
7488     return false;
7489   };
7490 
7491   if (!IsVectorizableOpcode(I->getOpcode()))
7492     return nullptr;
7493 
7494   // Success: widen this instruction.
7495   return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands()));
7496 }
7497 
7498 VPBasicBlock *VPRecipeBuilder::handleReplication(
7499     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
7500     DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
7501     VPlanPtr &Plan) {
7502   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
7503       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
7504       Range);
7505 
7506   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
7507       [&](ElementCount VF) { return CM.isScalarWithPredication(I, VF); },
7508       Range);
7509 
7510   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
7511                                        IsUniform, IsPredicated);
7512   setRecipe(I, Recipe);
7513 
7514   // Find if I uses a predicated instruction. If so, it will use its scalar
7515   // value. Avoid hoisting the insert-element which packs the scalar value into
7516   // a vector value, as that happens iff all users use the vector value.
7517   for (auto &Op : I->operands())
7518     if (auto *PredInst = dyn_cast<Instruction>(Op))
7519       if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
7520         PredInst2Recipe[PredInst]->setAlsoPack(false);
7521 
7522   // Finalize the recipe for Instr, first if it is not predicated.
7523   if (!IsPredicated) {
7524     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
7525     VPBB->appendRecipe(Recipe);
7526     return VPBB;
7527   }
7528   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
7529   assert(VPBB->getSuccessors().empty() &&
7530          "VPBB has successors when handling predicated replication.");
7531   // Record predicated instructions for above packing optimizations.
7532   PredInst2Recipe[I] = Recipe;
7533   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
7534   VPBlockUtils::insertBlockAfter(Region, VPBB);
7535   auto *RegSucc = new VPBasicBlock();
7536   VPBlockUtils::insertBlockAfter(RegSucc, Region);
7537   return RegSucc;
7538 }
7539 
7540 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
7541                                                       VPRecipeBase *PredRecipe,
7542                                                       VPlanPtr &Plan) {
7543   // Instructions marked for predication are replicated and placed under an
7544   // if-then construct to prevent side-effects.
7545 
7546   // Generate recipes to compute the block mask for this region.
7547   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
7548 
7549   // Build the triangular if-then region.
7550   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
7551   assert(Instr->getParent() && "Predicated instruction not in any basic block");
7552   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
7553   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
7554   auto *PHIRecipe =
7555       Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr);
7556   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
7557   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
7558   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
7559 
7560   // Note: first set Entry as region entry and then connect successors starting
7561   // from it in order, to propagate the "parent" of each VPBasicBlock.
7562   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
7563   VPBlockUtils::connectBlocks(Pred, Exit);
7564 
7565   return Region;
7566 }
7567 
7568 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
7569                                                       VFRange &Range,
7570                                                       VPlanPtr &Plan) {
7571   // First, check for specific widening recipes that deal with calls, memory
7572   // operations, inductions and Phi nodes.
7573   if (auto *CI = dyn_cast<CallInst>(Instr))
7574     return tryToWidenCall(CI, Range, *Plan);
7575 
7576   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
7577     return tryToWidenMemory(Instr, Range, Plan);
7578 
7579   VPRecipeBase *Recipe;
7580   if (auto Phi = dyn_cast<PHINode>(Instr)) {
7581     if (Phi->getParent() != OrigLoop->getHeader())
7582       return tryToBlend(Phi, Plan);
7583     if ((Recipe = tryToOptimizeInductionPHI(Phi)))
7584       return Recipe;
7585     return new VPWidenPHIRecipe(Phi);
7586   }
7587 
7588   if (isa<TruncInst>(Instr) &&
7589       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Range)))
7590     return Recipe;
7591 
7592   if (!shouldWiden(Instr, Range))
7593     return nullptr;
7594 
7595   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
7596     return new VPWidenGEPRecipe(GEP, Plan->mapToVPValues(GEP->operands()),
7597                                 OrigLoop);
7598 
7599   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
7600     bool InvariantCond =
7601         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
7602     return new VPWidenSelectRecipe(*SI, Plan->mapToVPValues(SI->operands()),
7603                                    InvariantCond);
7604   }
7605 
7606   return tryToWiden(Instr, *Plan);
7607 }
7608 
7609 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF,
7610                                                         unsigned MaxVF) {
7611   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7612 
7613   // Collect conditions feeding internal conditional branches; they need to be
7614   // represented in VPlan for it to model masking.
7615   SmallPtrSet<Value *, 1> NeedDef;
7616 
7617   auto *Latch = OrigLoop->getLoopLatch();
7618   for (BasicBlock *BB : OrigLoop->blocks()) {
7619     if (BB == Latch)
7620       continue;
7621     BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator());
7622     if (Branch && Branch->isConditional())
7623       NeedDef.insert(Branch->getCondition());
7624   }
7625 
7626   // If the tail is to be folded by masking, the primary induction variable, if
7627   // exists needs to be represented in VPlan for it to model early-exit masking.
7628   // Also, both the Phi and the live-out instruction of each reduction are
7629   // required in order to introduce a select between them in VPlan.
7630   if (CM.foldTailByMasking()) {
7631     if (Legal->getPrimaryInduction())
7632       NeedDef.insert(Legal->getPrimaryInduction());
7633     for (auto &Reduction : Legal->getReductionVars()) {
7634       NeedDef.insert(Reduction.first);
7635       NeedDef.insert(Reduction.second.getLoopExitInstr());
7636     }
7637   }
7638 
7639   // Collect instructions from the original loop that will become trivially dead
7640   // in the vectorized loop. We don't need to vectorize these instructions. For
7641   // example, original induction update instructions can become dead because we
7642   // separately emit induction "steps" when generating code for the new loop.
7643   // Similarly, we create a new latch condition when setting up the structure
7644   // of the new loop, so the old one can become dead.
7645   SmallPtrSet<Instruction *, 4> DeadInstructions;
7646   collectTriviallyDeadInstructions(DeadInstructions);
7647 
7648   // Add assume instructions we need to drop to DeadInstructions, to prevent
7649   // them from being added to the VPlan.
7650   // TODO: We only need to drop assumes in blocks that get flattend. If the
7651   // control flow is preserved, we should keep them.
7652   auto &ConditionalAssumes = Legal->getConditionalAssumes();
7653   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
7654 
7655   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
7656   // Dead instructions do not need sinking. Remove them from SinkAfter.
7657   for (Instruction *I : DeadInstructions)
7658     SinkAfter.erase(I);
7659 
7660   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
7661     VFRange SubRange = {VF, MaxVF + 1};
7662     VPlans.push_back(buildVPlanWithVPRecipes(SubRange, NeedDef,
7663                                              DeadInstructions, SinkAfter));
7664     VF = SubRange.End;
7665   }
7666 }
7667 
7668 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
7669     VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef,
7670     SmallPtrSetImpl<Instruction *> &DeadInstructions,
7671     const DenseMap<Instruction *, Instruction *> &SinkAfter) {
7672 
7673   // Hold a mapping from predicated instructions to their recipes, in order to
7674   // fix their AlsoPack behavior if a user is determined to replicate and use a
7675   // scalar instead of vector value.
7676   DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
7677 
7678   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
7679 
7680   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
7681 
7682   // ---------------------------------------------------------------------------
7683   // Pre-construction: record ingredients whose recipes we'll need to further
7684   // process after constructing the initial VPlan.
7685   // ---------------------------------------------------------------------------
7686 
7687   // Mark instructions we'll need to sink later and their targets as
7688   // ingredients whose recipe we'll need to record.
7689   for (auto &Entry : SinkAfter) {
7690     RecipeBuilder.recordRecipeOf(Entry.first);
7691     RecipeBuilder.recordRecipeOf(Entry.second);
7692   }
7693   for (auto &Reduction : CM.getInLoopReductionChains()) {
7694     PHINode *Phi = Reduction.first;
7695     RecurrenceDescriptor::RecurrenceKind Kind =
7696         Legal->getReductionVars()[Phi].getRecurrenceKind();
7697     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
7698 
7699     RecipeBuilder.recordRecipeOf(Phi);
7700     for (auto &R : ReductionOperations) {
7701       RecipeBuilder.recordRecipeOf(R);
7702       // For min/max reducitons, where we have a pair of icmp/select, we also
7703       // need to record the ICmp recipe, so it can be removed later.
7704       if (Kind == RecurrenceDescriptor::RK_IntegerMinMax ||
7705           Kind == RecurrenceDescriptor::RK_FloatMinMax) {
7706         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
7707       }
7708     }
7709   }
7710 
7711   // For each interleave group which is relevant for this (possibly trimmed)
7712   // Range, add it to the set of groups to be later applied to the VPlan and add
7713   // placeholders for its members' Recipes which we'll be replacing with a
7714   // single VPInterleaveRecipe.
7715   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
7716     auto applyIG = [IG, this](ElementCount VF) -> bool {
7717       return (VF.isVector() && // Query is illegal for VF == 1
7718               CM.getWideningDecision(IG->getInsertPos(), VF) ==
7719                   LoopVectorizationCostModel::CM_Interleave);
7720     };
7721     if (!getDecisionAndClampRange(applyIG, Range))
7722       continue;
7723     InterleaveGroups.insert(IG);
7724     for (unsigned i = 0; i < IG->getFactor(); i++)
7725       if (Instruction *Member = IG->getMember(i))
7726         RecipeBuilder.recordRecipeOf(Member);
7727   };
7728 
7729   // ---------------------------------------------------------------------------
7730   // Build initial VPlan: Scan the body of the loop in a topological order to
7731   // visit each basic block after having visited its predecessor basic blocks.
7732   // ---------------------------------------------------------------------------
7733 
7734   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
7735   auto Plan = std::make_unique<VPlan>();
7736   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
7737   Plan->setEntry(VPBB);
7738 
7739   // Represent values that will have defs inside VPlan.
7740   for (Value *V : NeedDef)
7741     Plan->addVPValue(V);
7742 
7743   // Scan the body of the loop in a topological order to visit each basic block
7744   // after having visited its predecessor basic blocks.
7745   LoopBlocksDFS DFS(OrigLoop);
7746   DFS.perform(LI);
7747 
7748   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
7749     // Relevant instructions from basic block BB will be grouped into VPRecipe
7750     // ingredients and fill a new VPBasicBlock.
7751     unsigned VPBBsForBB = 0;
7752     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
7753     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
7754     VPBB = FirstVPBBForBB;
7755     Builder.setInsertPoint(VPBB);
7756 
7757     // Introduce each ingredient into VPlan.
7758     // TODO: Model and preserve debug instrinsics in VPlan.
7759     for (Instruction &I : BB->instructionsWithoutDebug()) {
7760       Instruction *Instr = &I;
7761 
7762       // First filter out irrelevant instructions, to ensure no recipes are
7763       // built for them.
7764       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
7765         continue;
7766 
7767       if (auto Recipe =
7768               RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) {
7769         // Check if the recipe can be converted to a VPValue. We need the extra
7770         // down-casting step until VPRecipeBase inherits from VPValue.
7771         VPValue *MaybeVPValue = Recipe->toVPValue();
7772         if (!Instr->getType()->isVoidTy() && MaybeVPValue) {
7773           if (NeedDef.contains(Instr))
7774             Plan->addOrReplaceVPValue(Instr, MaybeVPValue);
7775           else
7776             Plan->addVPValue(Instr, MaybeVPValue);
7777         }
7778 
7779         RecipeBuilder.setRecipe(Instr, Recipe);
7780         VPBB->appendRecipe(Recipe);
7781         continue;
7782       }
7783 
7784       // Otherwise, if all widening options failed, Instruction is to be
7785       // replicated. This may create a successor for VPBB.
7786       VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication(
7787           Instr, Range, VPBB, PredInst2Recipe, Plan);
7788       if (NextVPBB != VPBB) {
7789         VPBB = NextVPBB;
7790         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
7791                                     : "");
7792       }
7793     }
7794   }
7795 
7796   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
7797   // may also be empty, such as the last one VPBB, reflecting original
7798   // basic-blocks with no recipes.
7799   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
7800   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
7801   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
7802   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
7803   delete PreEntry;
7804 
7805   // ---------------------------------------------------------------------------
7806   // Transform initial VPlan: Apply previously taken decisions, in order, to
7807   // bring the VPlan to its final state.
7808   // ---------------------------------------------------------------------------
7809 
7810   // Apply Sink-After legal constraints.
7811   for (auto &Entry : SinkAfter) {
7812     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
7813     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
7814     Sink->moveAfter(Target);
7815   }
7816 
7817   // Interleave memory: for each Interleave Group we marked earlier as relevant
7818   // for this VPlan, replace the Recipes widening its memory instructions with a
7819   // single VPInterleaveRecipe at its insertion point.
7820   for (auto IG : InterleaveGroups) {
7821     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
7822         RecipeBuilder.getRecipe(IG->getInsertPos()));
7823     (new VPInterleaveRecipe(IG, Recipe->getAddr(), Recipe->getMask()))
7824         ->insertBefore(Recipe);
7825 
7826     for (unsigned i = 0; i < IG->getFactor(); ++i)
7827       if (Instruction *Member = IG->getMember(i)) {
7828         if (!Member->getType()->isVoidTy()) {
7829           VPValue *OriginalV = Plan->getVPValue(Member);
7830           Plan->removeVPValueFor(Member);
7831           OriginalV->replaceAllUsesWith(Plan->getOrAddVPValue(Member));
7832         }
7833         RecipeBuilder.getRecipe(Member)->eraseFromParent();
7834       }
7835   }
7836 
7837   // Adjust the recipes for any inloop reductions.
7838   if (Range.Start > 1)
7839     adjustRecipesForInLoopReductions(Plan, RecipeBuilder);
7840 
7841   // Finally, if tail is folded by masking, introduce selects between the phi
7842   // and the live-out instruction of each reduction, at the end of the latch.
7843   if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) {
7844     Builder.setInsertPoint(VPBB);
7845     auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
7846     for (auto &Reduction : Legal->getReductionVars()) {
7847       if (CM.isInLoopReduction(Reduction.first))
7848         continue;
7849       VPValue *Phi = Plan->getVPValue(Reduction.first);
7850       VPValue *Red = Plan->getVPValue(Reduction.second.getLoopExitInstr());
7851       Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
7852     }
7853   }
7854 
7855   std::string PlanName;
7856   raw_string_ostream RSO(PlanName);
7857   ElementCount VF = ElementCount::getFixed(Range.Start);
7858   Plan->addVF(VF);
7859   RSO << "Initial VPlan for VF={" << VF;
7860   for (VF *= 2; VF.getKnownMinValue() < Range.End; VF *= 2) {
7861     Plan->addVF(VF);
7862     RSO << "," << VF;
7863   }
7864   RSO << "},UF>=1";
7865   RSO.flush();
7866   Plan->setName(PlanName);
7867 
7868   return Plan;
7869 }
7870 
7871 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
7872   // Outer loop handling: They may require CFG and instruction level
7873   // transformations before even evaluating whether vectorization is profitable.
7874   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7875   // the vectorization pipeline.
7876   assert(!OrigLoop->isInnermost());
7877   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7878 
7879   // Create new empty VPlan
7880   auto Plan = std::make_unique<VPlan>();
7881 
7882   // Build hierarchical CFG
7883   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
7884   HCFGBuilder.buildHierarchicalCFG();
7885 
7886   for (unsigned VF = Range.Start; VF < Range.End; VF *= 2)
7887     Plan->addVF(ElementCount::getFixed(VF));
7888 
7889   if (EnableVPlanPredication) {
7890     VPlanPredicator VPP(*Plan);
7891     VPP.predicate();
7892 
7893     // Avoid running transformation to recipes until masked code generation in
7894     // VPlan-native path is in place.
7895     return Plan;
7896   }
7897 
7898   SmallPtrSet<Instruction *, 1> DeadInstructions;
7899   VPlanTransforms::VPInstructionsToVPRecipes(
7900       OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions);
7901   return Plan;
7902 }
7903 
7904 // Adjust the recipes for any inloop reductions. The chain of instructions
7905 // leading from the loop exit instr to the phi need to be converted to
7906 // reductions, with one operand being vector and the other being the scalar
7907 // reduction chain.
7908 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions(
7909     VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) {
7910   for (auto &Reduction : CM.getInLoopReductionChains()) {
7911     PHINode *Phi = Reduction.first;
7912     RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
7913     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
7914 
7915     // ReductionOperations are orders top-down from the phi's use to the
7916     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
7917     // which of the two operands will remain scalar and which will be reduced.
7918     // For minmax the chain will be the select instructions.
7919     Instruction *Chain = Phi;
7920     for (Instruction *R : ReductionOperations) {
7921       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
7922       RecurrenceDescriptor::RecurrenceKind Kind = RdxDesc.getRecurrenceKind();
7923 
7924       VPValue *ChainOp = Plan->getVPValue(Chain);
7925       unsigned FirstOpId;
7926       if (Kind == RecurrenceDescriptor::RK_IntegerMinMax ||
7927           Kind == RecurrenceDescriptor::RK_FloatMinMax) {
7928         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
7929                "Expected to replace a VPWidenSelectSC");
7930         FirstOpId = 1;
7931       } else {
7932         assert(isa<VPWidenRecipe>(WidenRecipe) &&
7933                "Expected to replace a VPWidenSC");
7934         FirstOpId = 0;
7935       }
7936       unsigned VecOpId =
7937           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
7938       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
7939 
7940       auto *CondOp = CM.foldTailByMasking()
7941                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
7942                          : nullptr;
7943       VPReductionRecipe *RedRecipe = new VPReductionRecipe(
7944           &RdxDesc, R, ChainOp, VecOp, CondOp, Legal->hasFunNoNaNAttr(), TTI);
7945       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
7946       WidenRecipe->eraseFromParent();
7947 
7948       if (Kind == RecurrenceDescriptor::RK_IntegerMinMax ||
7949           Kind == RecurrenceDescriptor::RK_FloatMinMax) {
7950         VPRecipeBase *CompareRecipe =
7951             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
7952         assert(isa<VPWidenRecipe>(CompareRecipe) &&
7953                "Expected to replace a VPWidenSC");
7954         CompareRecipe->eraseFromParent();
7955       }
7956       Chain = R;
7957     }
7958   }
7959 }
7960 
7961 Value* LoopVectorizationPlanner::VPCallbackILV::
7962 getOrCreateVectorValues(Value *V, unsigned Part) {
7963       return ILV.getOrCreateVectorValue(V, Part);
7964 }
7965 
7966 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue(
7967     Value *V, const VPIteration &Instance) {
7968   return ILV.getOrCreateScalarValue(V, Instance);
7969 }
7970 
7971 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
7972                                VPSlotTracker &SlotTracker) const {
7973   O << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
7974   IG->getInsertPos()->printAsOperand(O, false);
7975   O << ", ";
7976   getAddr()->printAsOperand(O, SlotTracker);
7977   VPValue *Mask = getMask();
7978   if (Mask) {
7979     O << ", ";
7980     Mask->printAsOperand(O, SlotTracker);
7981   }
7982   for (unsigned i = 0; i < IG->getFactor(); ++i)
7983     if (Instruction *I = IG->getMember(i))
7984       O << "\\l\" +\n" << Indent << "\"  " << VPlanIngredient(I) << " " << i;
7985 }
7986 
7987 void VPWidenCallRecipe::execute(VPTransformState &State) {
7988   State.ILV->widenCallInstruction(Ingredient, *this, State);
7989 }
7990 
7991 void VPWidenSelectRecipe::execute(VPTransformState &State) {
7992   State.ILV->widenSelectInstruction(Ingredient, *this, InvariantCond, State);
7993 }
7994 
7995 void VPWidenRecipe::execute(VPTransformState &State) {
7996   State.ILV->widenInstruction(Ingredient, *this, State);
7997 }
7998 
7999 void VPWidenGEPRecipe::execute(VPTransformState &State) {
8000   State.ILV->widenGEP(GEP, *this, State.UF, State.VF, IsPtrLoopInvariant,
8001                       IsIndexLoopInvariant, State);
8002 }
8003 
8004 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
8005   assert(!State.Instance && "Int or FP induction being replicated.");
8006   State.ILV->widenIntOrFpInduction(IV, Trunc);
8007 }
8008 
8009 void VPWidenPHIRecipe::execute(VPTransformState &State) {
8010   State.ILV->widenPHIInstruction(Phi, State.UF, State.VF);
8011 }
8012 
8013 void VPBlendRecipe::execute(VPTransformState &State) {
8014   State.ILV->setDebugLocFromInst(State.Builder, Phi);
8015   // We know that all PHIs in non-header blocks are converted into
8016   // selects, so we don't have to worry about the insertion order and we
8017   // can just use the builder.
8018   // At this point we generate the predication tree. There may be
8019   // duplications since this is a simple recursive scan, but future
8020   // optimizations will clean it up.
8021 
8022   unsigned NumIncoming = getNumIncomingValues();
8023 
8024   // Generate a sequence of selects of the form:
8025   // SELECT(Mask3, In3,
8026   //        SELECT(Mask2, In2,
8027   //               SELECT(Mask1, In1,
8028   //                      In0)))
8029   // Note that Mask0 is never used: lanes for which no path reaches this phi and
8030   // are essentially undef are taken from In0.
8031   InnerLoopVectorizer::VectorParts Entry(State.UF);
8032   for (unsigned In = 0; In < NumIncoming; ++In) {
8033     for (unsigned Part = 0; Part < State.UF; ++Part) {
8034       // We might have single edge PHIs (blocks) - use an identity
8035       // 'select' for the first PHI operand.
8036       Value *In0 = State.get(getIncomingValue(In), Part);
8037       if (In == 0)
8038         Entry[Part] = In0; // Initialize with the first incoming value.
8039       else {
8040         // Select between the current value and the previous incoming edge
8041         // based on the incoming mask.
8042         Value *Cond = State.get(getMask(In), Part);
8043         Entry[Part] =
8044             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
8045       }
8046     }
8047   }
8048   for (unsigned Part = 0; Part < State.UF; ++Part)
8049     State.ValueMap.setVectorValue(Phi, Part, Entry[Part]);
8050 }
8051 
8052 void VPInterleaveRecipe::execute(VPTransformState &State) {
8053   assert(!State.Instance && "Interleave group being replicated.");
8054   State.ILV->vectorizeInterleaveGroup(IG, State, getAddr(), getMask());
8055 }
8056 
8057 void VPReductionRecipe::execute(VPTransformState &State) {
8058   assert(!State.Instance && "Reduction being replicated.");
8059   for (unsigned Part = 0; Part < State.UF; ++Part) {
8060     RecurrenceDescriptor::RecurrenceKind Kind = RdxDesc->getRecurrenceKind();
8061     Value *NewVecOp = State.get(VecOp, Part);
8062     if (CondOp) {
8063       Value *NewCond = State.get(CondOp, Part);
8064       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
8065       Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
8066           Kind, RdxDesc->getMinMaxRecurrenceKind(), VecTy->getElementType());
8067       Constant *IdenVec =
8068           ConstantVector::getSplat(VecTy->getElementCount(), Iden);
8069       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
8070       NewVecOp = Select;
8071     }
8072     Value *NewRed =
8073         createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp, NoNaN);
8074     Value *PrevInChain = State.get(ChainOp, Part);
8075     Value *NextInChain;
8076     if (Kind == RecurrenceDescriptor::RK_IntegerMinMax ||
8077         Kind == RecurrenceDescriptor::RK_FloatMinMax) {
8078       NextInChain =
8079           createMinMaxOp(State.Builder, RdxDesc->getMinMaxRecurrenceKind(),
8080                          NewRed, PrevInChain);
8081     } else {
8082       NextInChain = State.Builder.CreateBinOp(
8083           (Instruction::BinaryOps)I->getOpcode(), NewRed, PrevInChain);
8084     }
8085     State.ValueMap.setVectorValue(I, Part, NextInChain);
8086   }
8087 }
8088 
8089 void VPReplicateRecipe::execute(VPTransformState &State) {
8090   if (State.Instance) { // Generate a single instance.
8091     State.ILV->scalarizeInstruction(Ingredient, *this, *State.Instance,
8092                                     IsPredicated, State);
8093     // Insert scalar instance packing it into a vector.
8094     if (AlsoPack && State.VF.isVector()) {
8095       // If we're constructing lane 0, initialize to start from undef.
8096       if (State.Instance->Lane == 0) {
8097         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
8098         Value *Undef =
8099             UndefValue::get(VectorType::get(Ingredient->getType(), State.VF));
8100         State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef);
8101       }
8102       State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance);
8103     }
8104     return;
8105   }
8106 
8107   // Generate scalar instances for all VF lanes of all UF parts, unless the
8108   // instruction is uniform inwhich case generate only the first lane for each
8109   // of the UF parts.
8110   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
8111   for (unsigned Part = 0; Part < State.UF; ++Part)
8112     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
8113       State.ILV->scalarizeInstruction(Ingredient, *this, {Part, Lane},
8114                                       IsPredicated, State);
8115 }
8116 
8117 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
8118   assert(State.Instance && "Branch on Mask works only on single instance.");
8119 
8120   unsigned Part = State.Instance->Part;
8121   unsigned Lane = State.Instance->Lane;
8122 
8123   Value *ConditionBit = nullptr;
8124   VPValue *BlockInMask = getMask();
8125   if (BlockInMask) {
8126     ConditionBit = State.get(BlockInMask, Part);
8127     if (ConditionBit->getType()->isVectorTy())
8128       ConditionBit = State.Builder.CreateExtractElement(
8129           ConditionBit, State.Builder.getInt32(Lane));
8130   } else // Block in mask is all-one.
8131     ConditionBit = State.Builder.getTrue();
8132 
8133   // Replace the temporary unreachable terminator with a new conditional branch,
8134   // whose two destinations will be set later when they are created.
8135   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
8136   assert(isa<UnreachableInst>(CurrentTerminator) &&
8137          "Expected to replace unreachable terminator with conditional branch.");
8138   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
8139   CondBr->setSuccessor(0, nullptr);
8140   ReplaceInstWithInst(CurrentTerminator, CondBr);
8141 }
8142 
8143 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
8144   assert(State.Instance && "Predicated instruction PHI works per instance.");
8145   Instruction *ScalarPredInst = cast<Instruction>(
8146       State.ValueMap.getScalarValue(PredInst, *State.Instance));
8147   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
8148   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
8149   assert(PredicatingBB && "Predicated block has no single predecessor.");
8150 
8151   // By current pack/unpack logic we need to generate only a single phi node: if
8152   // a vector value for the predicated instruction exists at this point it means
8153   // the instruction has vector users only, and a phi for the vector value is
8154   // needed. In this case the recipe of the predicated instruction is marked to
8155   // also do that packing, thereby "hoisting" the insert-element sequence.
8156   // Otherwise, a phi node for the scalar value is needed.
8157   unsigned Part = State.Instance->Part;
8158   if (State.ValueMap.hasVectorValue(PredInst, Part)) {
8159     Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part);
8160     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
8161     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
8162     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
8163     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
8164     State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache.
8165   } else {
8166     Type *PredInstType = PredInst->getType();
8167     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
8168     Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB);
8169     Phi->addIncoming(ScalarPredInst, PredicatedBB);
8170     State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi);
8171   }
8172 }
8173 
8174 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
8175   Instruction *Instr = getUnderlyingInstr();
8176   VPValue *StoredValue = isa<StoreInst>(Instr) ? getStoredValue() : nullptr;
8177   State.ILV->vectorizeMemoryInstruction(Instr, State,
8178                                         StoredValue ? nullptr : this, getAddr(),
8179                                         StoredValue, getMask());
8180 }
8181 
8182 // Determine how to lower the scalar epilogue, which depends on 1) optimising
8183 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
8184 // predication, and 4) a TTI hook that analyses whether the loop is suitable
8185 // for predication.
8186 static ScalarEpilogueLowering getScalarEpilogueLowering(
8187     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
8188     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
8189     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
8190     LoopVectorizationLegality &LVL) {
8191   // 1) OptSize takes precedence over all other options, i.e. if this is set,
8192   // don't look at hints or options, and don't request a scalar epilogue.
8193   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
8194   // LoopAccessInfo (due to code dependency and not being able to reliably get
8195   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
8196   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
8197   // versioning when the vectorization is forced, unlike hasOptSize. So revert
8198   // back to the old way and vectorize with versioning when forced. See D81345.)
8199   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
8200                                                       PGSOQueryType::IRPass) &&
8201                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
8202     return CM_ScalarEpilogueNotAllowedOptSize;
8203 
8204   bool PredicateOptDisabled = PreferPredicateOverEpilogue.getNumOccurrences() &&
8205                               !PreferPredicateOverEpilogue;
8206 
8207   // 2) Next, if disabling predication is requested on the command line, honour
8208   // this and request a scalar epilogue.
8209   if (PredicateOptDisabled)
8210     return CM_ScalarEpilogueAllowed;
8211 
8212   // 3) and 4) look if enabling predication is requested on the command line,
8213   // with a loop hint, or if the TTI hook indicates this is profitable, request
8214   // predication.
8215   if (PreferPredicateOverEpilogue ||
8216       Hints.getPredicate() == LoopVectorizeHints::FK_Enabled ||
8217       (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
8218                                         LVL.getLAI()) &&
8219        Hints.getPredicate() != LoopVectorizeHints::FK_Disabled))
8220     return CM_ScalarEpilogueNotNeededUsePredicate;
8221 
8222   return CM_ScalarEpilogueAllowed;
8223 }
8224 
8225 void VPTransformState::set(VPValue *Def, Value *IRDef, Value *V,
8226                            unsigned Part) {
8227   set(Def, V, Part);
8228   ILV->setVectorValue(IRDef, Part, V);
8229 }
8230 
8231 // Process the loop in the VPlan-native vectorization path. This path builds
8232 // VPlan upfront in the vectorization pipeline, which allows to apply
8233 // VPlan-to-VPlan transformations from the very beginning without modifying the
8234 // input LLVM IR.
8235 static bool processLoopInVPlanNativePath(
8236     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
8237     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
8238     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
8239     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
8240     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) {
8241 
8242   if (PSE.getBackedgeTakenCount() == PSE.getSE()->getCouldNotCompute()) {
8243     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
8244     return false;
8245   }
8246   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
8247   Function *F = L->getHeader()->getParent();
8248   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
8249 
8250   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
8251       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
8252 
8253   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
8254                                 &Hints, IAI);
8255   // Use the planner for outer loop vectorization.
8256   // TODO: CM is not used at this point inside the planner. Turn CM into an
8257   // optional argument if we don't need it in the future.
8258   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE);
8259 
8260   // Get user vectorization factor.
8261   const unsigned UserVF = Hints.getWidth();
8262 
8263   // Plan how to best vectorize, return the best VF and its cost.
8264   const VectorizationFactor VF =
8265       LVP.planInVPlanNativePath(ElementCount::getFixed(UserVF));
8266 
8267   // If we are stress testing VPlan builds, do not attempt to generate vector
8268   // code. Masked vector code generation support will follow soon.
8269   // Also, do not attempt to vectorize if no vector code will be produced.
8270   if (VPlanBuildStressTest || EnableVPlanPredication ||
8271       VectorizationFactor::Disabled() == VF)
8272     return false;
8273 
8274   LVP.setBestPlan(VF.Width, 1);
8275 
8276   InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
8277                          &CM, BFI, PSI);
8278   LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
8279                     << L->getHeader()->getParent()->getName() << "\"\n");
8280   LVP.executePlan(LB, DT);
8281 
8282   // Mark the loop as already vectorized to avoid vectorizing again.
8283   Hints.setAlreadyVectorized();
8284 
8285   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
8286   return true;
8287 }
8288 
8289 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
8290     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
8291                                !EnableLoopInterleaving),
8292       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
8293                               !EnableLoopVectorization) {}
8294 
8295 bool LoopVectorizePass::processLoop(Loop *L) {
8296   assert((EnableVPlanNativePath || L->isInnermost()) &&
8297          "VPlan-native path is not enabled. Only process inner loops.");
8298 
8299 #ifndef NDEBUG
8300   const std::string DebugLocStr = getDebugLocString(L);
8301 #endif /* NDEBUG */
8302 
8303   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
8304                     << L->getHeader()->getParent()->getName() << "\" from "
8305                     << DebugLocStr << "\n");
8306 
8307   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
8308 
8309   LLVM_DEBUG(
8310       dbgs() << "LV: Loop hints:"
8311              << " force="
8312              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
8313                      ? "disabled"
8314                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
8315                             ? "enabled"
8316                             : "?"))
8317              << " width=" << Hints.getWidth()
8318              << " unroll=" << Hints.getInterleave() << "\n");
8319 
8320   // Function containing loop
8321   Function *F = L->getHeader()->getParent();
8322 
8323   // Looking at the diagnostic output is the only way to determine if a loop
8324   // was vectorized (other than looking at the IR or machine code), so it
8325   // is important to generate an optimization remark for each loop. Most of
8326   // these messages are generated as OptimizationRemarkAnalysis. Remarks
8327   // generated as OptimizationRemark and OptimizationRemarkMissed are
8328   // less verbose reporting vectorized loops and unvectorized loops that may
8329   // benefit from vectorization, respectively.
8330 
8331   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
8332     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
8333     return false;
8334   }
8335 
8336   PredicatedScalarEvolution PSE(*SE, *L);
8337 
8338   // Check if it is legal to vectorize the loop.
8339   LoopVectorizationRequirements Requirements(*ORE);
8340   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
8341                                 &Requirements, &Hints, DB, AC, BFI, PSI);
8342   if (!LVL.canVectorize(EnableVPlanNativePath)) {
8343     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
8344     Hints.emitRemarkWithHints();
8345     return false;
8346   }
8347 
8348   // Check the function attributes and profiles to find out if this function
8349   // should be optimized for size.
8350   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
8351       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
8352 
8353   // Entrance to the VPlan-native vectorization path. Outer loops are processed
8354   // here. They may require CFG and instruction level transformations before
8355   // even evaluating whether vectorization is profitable. Since we cannot modify
8356   // the incoming IR, we need to build VPlan upfront in the vectorization
8357   // pipeline.
8358   if (!L->isInnermost())
8359     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
8360                                         ORE, BFI, PSI, Hints);
8361 
8362   assert(L->isInnermost() && "Inner loop expected.");
8363 
8364   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
8365   // count by optimizing for size, to minimize overheads.
8366   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
8367   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
8368     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
8369                       << "This loop is worth vectorizing only if no scalar "
8370                       << "iteration overheads are incurred.");
8371     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
8372       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
8373     else {
8374       LLVM_DEBUG(dbgs() << "\n");
8375       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
8376     }
8377   }
8378 
8379   // Check the function attributes to see if implicit floats are allowed.
8380   // FIXME: This check doesn't seem possibly correct -- what if the loop is
8381   // an integer loop and the vector instructions selected are purely integer
8382   // vector instructions?
8383   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
8384     reportVectorizationFailure(
8385         "Can't vectorize when the NoImplicitFloat attribute is used",
8386         "loop not vectorized due to NoImplicitFloat attribute",
8387         "NoImplicitFloat", ORE, L);
8388     Hints.emitRemarkWithHints();
8389     return false;
8390   }
8391 
8392   // Check if the target supports potentially unsafe FP vectorization.
8393   // FIXME: Add a check for the type of safety issue (denormal, signaling)
8394   // for the target we're vectorizing for, to make sure none of the
8395   // additional fp-math flags can help.
8396   if (Hints.isPotentiallyUnsafe() &&
8397       TTI->isFPVectorizationPotentiallyUnsafe()) {
8398     reportVectorizationFailure(
8399         "Potentially unsafe FP op prevents vectorization",
8400         "loop not vectorized due to unsafe FP support.",
8401         "UnsafeFP", ORE, L);
8402     Hints.emitRemarkWithHints();
8403     return false;
8404   }
8405 
8406   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
8407   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
8408 
8409   // If an override option has been passed in for interleaved accesses, use it.
8410   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
8411     UseInterleaved = EnableInterleavedMemAccesses;
8412 
8413   // Analyze interleaved memory accesses.
8414   if (UseInterleaved) {
8415     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
8416   }
8417 
8418   // Use the cost model.
8419   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
8420                                 F, &Hints, IAI);
8421   CM.collectValuesToIgnore();
8422 
8423   // Use the planner for vectorization.
8424   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE);
8425 
8426   // Get user vectorization factor and interleave count.
8427   unsigned UserVF = Hints.getWidth();
8428   unsigned UserIC = Hints.getInterleave();
8429 
8430   // Plan how to best vectorize, return the best VF and its cost.
8431   Optional<VectorizationFactor> MaybeVF =
8432       LVP.plan(ElementCount::getFixed(UserVF), UserIC);
8433 
8434   VectorizationFactor VF = VectorizationFactor::Disabled();
8435   unsigned IC = 1;
8436 
8437   if (MaybeVF) {
8438     VF = *MaybeVF;
8439     // Select the interleave count.
8440     IC = CM.selectInterleaveCount(VF.Width, VF.Cost);
8441   }
8442 
8443   // Identify the diagnostic messages that should be produced.
8444   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
8445   bool VectorizeLoop = true, InterleaveLoop = true;
8446   if (Requirements.doesNotMeet(F, L, Hints)) {
8447     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
8448                          "requirements.\n");
8449     Hints.emitRemarkWithHints();
8450     return false;
8451   }
8452 
8453   if (VF.Width.isScalar()) {
8454     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
8455     VecDiagMsg = std::make_pair(
8456         "VectorizationNotBeneficial",
8457         "the cost-model indicates that vectorization is not beneficial");
8458     VectorizeLoop = false;
8459   }
8460 
8461   if (!MaybeVF && UserIC > 1) {
8462     // Tell the user interleaving was avoided up-front, despite being explicitly
8463     // requested.
8464     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
8465                          "interleaving should be avoided up front\n");
8466     IntDiagMsg = std::make_pair(
8467         "InterleavingAvoided",
8468         "Ignoring UserIC, because interleaving was avoided up front");
8469     InterleaveLoop = false;
8470   } else if (IC == 1 && UserIC <= 1) {
8471     // Tell the user interleaving is not beneficial.
8472     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
8473     IntDiagMsg = std::make_pair(
8474         "InterleavingNotBeneficial",
8475         "the cost-model indicates that interleaving is not beneficial");
8476     InterleaveLoop = false;
8477     if (UserIC == 1) {
8478       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
8479       IntDiagMsg.second +=
8480           " and is explicitly disabled or interleave count is set to 1";
8481     }
8482   } else if (IC > 1 && UserIC == 1) {
8483     // Tell the user interleaving is beneficial, but it explicitly disabled.
8484     LLVM_DEBUG(
8485         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
8486     IntDiagMsg = std::make_pair(
8487         "InterleavingBeneficialButDisabled",
8488         "the cost-model indicates that interleaving is beneficial "
8489         "but is explicitly disabled or interleave count is set to 1");
8490     InterleaveLoop = false;
8491   }
8492 
8493   // Override IC if user provided an interleave count.
8494   IC = UserIC > 0 ? UserIC : IC;
8495 
8496   // Emit diagnostic messages, if any.
8497   const char *VAPassName = Hints.vectorizeAnalysisPassName();
8498   if (!VectorizeLoop && !InterleaveLoop) {
8499     // Do not vectorize or interleaving the loop.
8500     ORE->emit([&]() {
8501       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
8502                                       L->getStartLoc(), L->getHeader())
8503              << VecDiagMsg.second;
8504     });
8505     ORE->emit([&]() {
8506       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
8507                                       L->getStartLoc(), L->getHeader())
8508              << IntDiagMsg.second;
8509     });
8510     return false;
8511   } else if (!VectorizeLoop && InterleaveLoop) {
8512     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
8513     ORE->emit([&]() {
8514       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
8515                                         L->getStartLoc(), L->getHeader())
8516              << VecDiagMsg.second;
8517     });
8518   } else if (VectorizeLoop && !InterleaveLoop) {
8519     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
8520                       << ") in " << DebugLocStr << '\n');
8521     ORE->emit([&]() {
8522       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
8523                                         L->getStartLoc(), L->getHeader())
8524              << IntDiagMsg.second;
8525     });
8526   } else if (VectorizeLoop && InterleaveLoop) {
8527     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
8528                       << ") in " << DebugLocStr << '\n');
8529     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
8530   }
8531 
8532   LVP.setBestPlan(VF.Width, IC);
8533 
8534   using namespace ore;
8535   bool DisableRuntimeUnroll = false;
8536   MDNode *OrigLoopID = L->getLoopID();
8537 
8538   if (!VectorizeLoop) {
8539     assert(IC > 1 && "interleave count should not be 1 or 0");
8540     // If we decided that it is not legal to vectorize the loop, then
8541     // interleave it.
8542     InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, &CM,
8543                                BFI, PSI);
8544     LVP.executePlan(Unroller, DT);
8545 
8546     ORE->emit([&]() {
8547       return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
8548                                 L->getHeader())
8549              << "interleaved loop (interleaved count: "
8550              << NV("InterleaveCount", IC) << ")";
8551     });
8552   } else {
8553     // If we decided that it is *legal* to vectorize the loop, then do it.
8554     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
8555                            &LVL, &CM, BFI, PSI);
8556     LVP.executePlan(LB, DT);
8557     ++LoopsVectorized;
8558 
8559     // Add metadata to disable runtime unrolling a scalar loop when there are
8560     // no runtime checks about strides and memory. A scalar loop that is
8561     // rarely used is not worth unrolling.
8562     if (!LB.areSafetyChecksAdded())
8563       DisableRuntimeUnroll = true;
8564 
8565     // Report the vectorization decision.
8566     ORE->emit([&]() {
8567       return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
8568                                 L->getHeader())
8569              << "vectorized loop (vectorization width: "
8570              << NV("VectorizationFactor", VF.Width)
8571              << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
8572     });
8573   }
8574 
8575   Optional<MDNode *> RemainderLoopID =
8576       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
8577                                       LLVMLoopVectorizeFollowupEpilogue});
8578   if (RemainderLoopID.hasValue()) {
8579     L->setLoopID(RemainderLoopID.getValue());
8580   } else {
8581     if (DisableRuntimeUnroll)
8582       AddRuntimeUnrollDisableMetaData(L);
8583 
8584     // Mark the loop as already vectorized to avoid vectorizing again.
8585     Hints.setAlreadyVectorized();
8586   }
8587 
8588   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
8589   return true;
8590 }
8591 
8592 LoopVectorizeResult LoopVectorizePass::runImpl(
8593     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
8594     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
8595     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
8596     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
8597     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
8598   SE = &SE_;
8599   LI = &LI_;
8600   TTI = &TTI_;
8601   DT = &DT_;
8602   BFI = &BFI_;
8603   TLI = TLI_;
8604   AA = &AA_;
8605   AC = &AC_;
8606   GetLAA = &GetLAA_;
8607   DB = &DB_;
8608   ORE = &ORE_;
8609   PSI = PSI_;
8610 
8611   // Don't attempt if
8612   // 1. the target claims to have no vector registers, and
8613   // 2. interleaving won't help ILP.
8614   //
8615   // The second condition is necessary because, even if the target has no
8616   // vector registers, loop vectorization may still enable scalar
8617   // interleaving.
8618   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
8619       TTI->getMaxInterleaveFactor(1) < 2)
8620     return LoopVectorizeResult(false, false);
8621 
8622   bool Changed = false, CFGChanged = false;
8623 
8624   // The vectorizer requires loops to be in simplified form.
8625   // Since simplification may add new inner loops, it has to run before the
8626   // legality and profitability checks. This means running the loop vectorizer
8627   // will simplify all loops, regardless of whether anything end up being
8628   // vectorized.
8629   for (auto &L : *LI)
8630     Changed |= CFGChanged |=
8631         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
8632 
8633   // Build up a worklist of inner-loops to vectorize. This is necessary as
8634   // the act of vectorizing or partially unrolling a loop creates new loops
8635   // and can invalidate iterators across the loops.
8636   SmallVector<Loop *, 8> Worklist;
8637 
8638   for (Loop *L : *LI)
8639     collectSupportedLoops(*L, LI, ORE, Worklist);
8640 
8641   LoopsAnalyzed += Worklist.size();
8642 
8643   // Now walk the identified inner loops.
8644   while (!Worklist.empty()) {
8645     Loop *L = Worklist.pop_back_val();
8646 
8647     // For the inner loops we actually process, form LCSSA to simplify the
8648     // transform.
8649     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
8650 
8651     Changed |= CFGChanged |= processLoop(L);
8652   }
8653 
8654   // Process each loop nest in the function.
8655   return LoopVectorizeResult(Changed, CFGChanged);
8656 }
8657 
8658 PreservedAnalyses LoopVectorizePass::run(Function &F,
8659                                          FunctionAnalysisManager &AM) {
8660     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
8661     auto &LI = AM.getResult<LoopAnalysis>(F);
8662     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
8663     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
8664     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
8665     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
8666     auto &AA = AM.getResult<AAManager>(F);
8667     auto &AC = AM.getResult<AssumptionAnalysis>(F);
8668     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
8669     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
8670     MemorySSA *MSSA = EnableMSSALoopDependency
8671                           ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
8672                           : nullptr;
8673 
8674     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
8675     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
8676         [&](Loop &L) -> const LoopAccessInfo & {
8677       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,  SE,
8678                                         TLI, TTI, nullptr, MSSA};
8679       return LAM.getResult<LoopAccessAnalysis>(L, AR);
8680     };
8681     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
8682     ProfileSummaryInfo *PSI =
8683         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
8684     LoopVectorizeResult Result =
8685         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
8686     if (!Result.MadeAnyChange)
8687       return PreservedAnalyses::all();
8688     PreservedAnalyses PA;
8689 
8690     // We currently do not preserve loopinfo/dominator analyses with outer loop
8691     // vectorization. Until this is addressed, mark these analyses as preserved
8692     // only for non-VPlan-native path.
8693     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
8694     if (!EnableVPlanNativePath) {
8695       PA.preserve<LoopAnalysis>();
8696       PA.preserve<DominatorTreeAnalysis>();
8697     }
8698     PA.preserve<BasicAA>();
8699     PA.preserve<GlobalsAA>();
8700     if (!Result.MadeCFGChange)
8701       PA.preserveSet<CFGAnalyses>();
8702     return PA;
8703 }
8704