1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/MemorySSA.h"
91 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
92 #include "llvm/Analysis/ProfileSummaryInfo.h"
93 #include "llvm/Analysis/ScalarEvolution.h"
94 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
95 #include "llvm/Analysis/TargetLibraryInfo.h"
96 #include "llvm/Analysis/TargetTransformInfo.h"
97 #include "llvm/Analysis/VectorUtils.h"
98 #include "llvm/IR/Attributes.h"
99 #include "llvm/IR/BasicBlock.h"
100 #include "llvm/IR/CFG.h"
101 #include "llvm/IR/Constant.h"
102 #include "llvm/IR/Constants.h"
103 #include "llvm/IR/DataLayout.h"
104 #include "llvm/IR/DebugInfoMetadata.h"
105 #include "llvm/IR/DebugLoc.h"
106 #include "llvm/IR/DerivedTypes.h"
107 #include "llvm/IR/DiagnosticInfo.h"
108 #include "llvm/IR/Dominators.h"
109 #include "llvm/IR/Function.h"
110 #include "llvm/IR/IRBuilder.h"
111 #include "llvm/IR/InstrTypes.h"
112 #include "llvm/IR/Instruction.h"
113 #include "llvm/IR/Instructions.h"
114 #include "llvm/IR/IntrinsicInst.h"
115 #include "llvm/IR/Intrinsics.h"
116 #include "llvm/IR/LLVMContext.h"
117 #include "llvm/IR/Metadata.h"
118 #include "llvm/IR/Module.h"
119 #include "llvm/IR/Operator.h"
120 #include "llvm/IR/Type.h"
121 #include "llvm/IR/Use.h"
122 #include "llvm/IR/User.h"
123 #include "llvm/IR/Value.h"
124 #include "llvm/IR/ValueHandle.h"
125 #include "llvm/IR/Verifier.h"
126 #include "llvm/InitializePasses.h"
127 #include "llvm/Pass.h"
128 #include "llvm/Support/Casting.h"
129 #include "llvm/Support/CommandLine.h"
130 #include "llvm/Support/Compiler.h"
131 #include "llvm/Support/Debug.h"
132 #include "llvm/Support/ErrorHandling.h"
133 #include "llvm/Support/MathExtras.h"
134 #include "llvm/Support/raw_ostream.h"
135 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
136 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
137 #include "llvm/Transforms/Utils/LoopSimplify.h"
138 #include "llvm/Transforms/Utils/LoopUtils.h"
139 #include "llvm/Transforms/Utils/LoopVersioning.h"
140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
141 #include "llvm/Transforms/Utils/SizeOpts.h"
142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
143 #include <algorithm>
144 #include <cassert>
145 #include <cstdint>
146 #include <cstdlib>
147 #include <functional>
148 #include <iterator>
149 #include <limits>
150 #include <memory>
151 #include <string>
152 #include <tuple>
153 #include <utility>
154 
155 using namespace llvm;
156 
157 #define LV_NAME "loop-vectorize"
158 #define DEBUG_TYPE LV_NAME
159 
160 /// @{
161 /// Metadata attribute names
162 static const char *const LLVMLoopVectorizeFollowupAll =
163     "llvm.loop.vectorize.followup_all";
164 static const char *const LLVMLoopVectorizeFollowupVectorized =
165     "llvm.loop.vectorize.followup_vectorized";
166 static const char *const LLVMLoopVectorizeFollowupEpilogue =
167     "llvm.loop.vectorize.followup_epilogue";
168 /// @}
169 
170 STATISTIC(LoopsVectorized, "Number of loops vectorized");
171 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
172 
173 /// Loops with a known constant trip count below this number are vectorized only
174 /// if no scalar iteration overheads are incurred.
175 static cl::opt<unsigned> TinyTripCountVectorThreshold(
176     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
177     cl::desc("Loops with a constant trip count that is smaller than this "
178              "value are vectorized only if no scalar iteration overheads "
179              "are incurred."));
180 
181 // Indicates that an epilogue is undesired, predication is preferred.
182 // This means that the vectorizer will try to fold the loop-tail (epilogue)
183 // into the loop and predicate the loop body accordingly.
184 static cl::opt<bool> PreferPredicateOverEpilog(
185     "prefer-predicate-over-epilog", cl::init(false), cl::Hidden,
186     cl::desc("Indicate that an epilogue is undesired, predication should be "
187              "used instead."));
188 
189 static cl::opt<bool> MaximizeBandwidth(
190     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
191     cl::desc("Maximize bandwidth when selecting vectorization factor which "
192              "will be determined by the smallest type in loop."));
193 
194 static cl::opt<bool> EnableInterleavedMemAccesses(
195     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
196     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
197 
198 /// An interleave-group may need masking if it resides in a block that needs
199 /// predication, or in order to mask away gaps.
200 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
201     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
202     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
203 
204 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
205     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
206     cl::desc("We don't interleave loops with a estimated constant trip count "
207              "below this number"));
208 
209 static cl::opt<unsigned> ForceTargetNumScalarRegs(
210     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
211     cl::desc("A flag that overrides the target's number of scalar registers."));
212 
213 static cl::opt<unsigned> ForceTargetNumVectorRegs(
214     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
215     cl::desc("A flag that overrides the target's number of vector registers."));
216 
217 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
218     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
219     cl::desc("A flag that overrides the target's max interleave factor for "
220              "scalar loops."));
221 
222 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
223     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
224     cl::desc("A flag that overrides the target's max interleave factor for "
225              "vectorized loops."));
226 
227 static cl::opt<unsigned> ForceTargetInstructionCost(
228     "force-target-instruction-cost", cl::init(0), cl::Hidden,
229     cl::desc("A flag that overrides the target's expected cost for "
230              "an instruction to a single constant value. Mostly "
231              "useful for getting consistent testing."));
232 
233 static cl::opt<unsigned> SmallLoopCost(
234     "small-loop-cost", cl::init(20), cl::Hidden,
235     cl::desc(
236         "The cost of a loop that is considered 'small' by the interleaver."));
237 
238 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
239     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
240     cl::desc("Enable the use of the block frequency analysis to access PGO "
241              "heuristics minimizing code growth in cold regions and being more "
242              "aggressive in hot regions."));
243 
244 // Runtime interleave loops for load/store throughput.
245 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
246     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
247     cl::desc(
248         "Enable runtime interleaving until load/store ports are saturated"));
249 
250 /// The number of stores in a loop that are allowed to need predication.
251 static cl::opt<unsigned> NumberOfStoresToPredicate(
252     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
253     cl::desc("Max number of stores to be predicated behind an if."));
254 
255 static cl::opt<bool> EnableIndVarRegisterHeur(
256     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
257     cl::desc("Count the induction variable only once when interleaving"));
258 
259 static cl::opt<bool> EnableCondStoresVectorization(
260     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
261     cl::desc("Enable if predication of stores during vectorization."));
262 
263 static cl::opt<unsigned> MaxNestedScalarReductionIC(
264     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
265     cl::desc("The maximum interleave count to use when interleaving a scalar "
266              "reduction in a nested loop."));
267 
268 cl::opt<bool> EnableVPlanNativePath(
269     "enable-vplan-native-path", cl::init(false), cl::Hidden,
270     cl::desc("Enable VPlan-native vectorization path with "
271              "support for outer loop vectorization."));
272 
273 // FIXME: Remove this switch once we have divergence analysis. Currently we
274 // assume divergent non-backedge branches when this switch is true.
275 cl::opt<bool> EnableVPlanPredication(
276     "enable-vplan-predication", cl::init(false), cl::Hidden,
277     cl::desc("Enable VPlan-native vectorization path predicator with "
278              "support for outer loop vectorization."));
279 
280 // This flag enables the stress testing of the VPlan H-CFG construction in the
281 // VPlan-native vectorization path. It must be used in conjuction with
282 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
283 // verification of the H-CFGs built.
284 static cl::opt<bool> VPlanBuildStressTest(
285     "vplan-build-stress-test", cl::init(false), cl::Hidden,
286     cl::desc(
287         "Build VPlan for every supported loop nest in the function and bail "
288         "out right after the build (stress test the VPlan H-CFG construction "
289         "in the VPlan-native vectorization path)."));
290 
291 cl::opt<bool> llvm::EnableLoopInterleaving(
292     "interleave-loops", cl::init(true), cl::Hidden,
293     cl::desc("Enable loop interleaving in Loop vectorization passes"));
294 cl::opt<bool> llvm::EnableLoopVectorization(
295     "vectorize-loops", cl::init(true), cl::Hidden,
296     cl::desc("Run the Loop vectorization passes"));
297 
298 /// A helper function that returns the type of loaded or stored value.
299 static Type *getMemInstValueType(Value *I) {
300   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
301          "Expected Load or Store instruction");
302   if (auto *LI = dyn_cast<LoadInst>(I))
303     return LI->getType();
304   return cast<StoreInst>(I)->getValueOperand()->getType();
305 }
306 
307 /// A helper function that returns true if the given type is irregular. The
308 /// type is irregular if its allocated size doesn't equal the store size of an
309 /// element of the corresponding vector type at the given vectorization factor.
310 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) {
311   // Determine if an array of VF elements of type Ty is "bitcast compatible"
312   // with a <VF x Ty> vector.
313   if (VF > 1) {
314     auto *VectorTy = FixedVectorType::get(Ty, VF);
315     return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy);
316   }
317 
318   // If the vectorization factor is one, we just check if an array of type Ty
319   // requires padding between elements.
320   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
321 }
322 
323 /// A helper function that returns the reciprocal of the block probability of
324 /// predicated blocks. If we return X, we are assuming the predicated block
325 /// will execute once for every X iterations of the loop header.
326 ///
327 /// TODO: We should use actual block probability here, if available. Currently,
328 ///       we always assume predicated blocks have a 50% chance of executing.
329 static unsigned getReciprocalPredBlockProb() { return 2; }
330 
331 /// A helper function that adds a 'fast' flag to floating-point operations.
332 static Value *addFastMathFlag(Value *V) {
333   if (isa<FPMathOperator>(V))
334     cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast());
335   return V;
336 }
337 
338 static Value *addFastMathFlag(Value *V, FastMathFlags FMF) {
339   if (isa<FPMathOperator>(V))
340     cast<Instruction>(V)->setFastMathFlags(FMF);
341   return V;
342 }
343 
344 /// A helper function that returns an integer or floating-point constant with
345 /// value C.
346 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
347   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
348                            : ConstantFP::get(Ty, C);
349 }
350 
351 /// Returns "best known" trip count for the specified loop \p L as defined by
352 /// the following procedure:
353 ///   1) Returns exact trip count if it is known.
354 ///   2) Returns expected trip count according to profile data if any.
355 ///   3) Returns upper bound estimate if it is known.
356 ///   4) Returns None if all of the above failed.
357 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
358   // Check if exact trip count is known.
359   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
360     return ExpectedTC;
361 
362   // Check if there is an expected trip count available from profile data.
363   if (LoopVectorizeWithBlockFrequency)
364     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
365       return EstimatedTC;
366 
367   // Check if upper bound estimate is known.
368   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
369     return ExpectedTC;
370 
371   return None;
372 }
373 
374 namespace llvm {
375 
376 /// InnerLoopVectorizer vectorizes loops which contain only one basic
377 /// block to a specified vectorization factor (VF).
378 /// This class performs the widening of scalars into vectors, or multiple
379 /// scalars. This class also implements the following features:
380 /// * It inserts an epilogue loop for handling loops that don't have iteration
381 ///   counts that are known to be a multiple of the vectorization factor.
382 /// * It handles the code generation for reduction variables.
383 /// * Scalarization (implementation using scalars) of un-vectorizable
384 ///   instructions.
385 /// InnerLoopVectorizer does not perform any vectorization-legality
386 /// checks, and relies on the caller to check for the different legality
387 /// aspects. The InnerLoopVectorizer relies on the
388 /// LoopVectorizationLegality class to provide information about the induction
389 /// and reduction variables that were found to a given vectorization factor.
390 class InnerLoopVectorizer {
391 public:
392   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
393                       LoopInfo *LI, DominatorTree *DT,
394                       const TargetLibraryInfo *TLI,
395                       const TargetTransformInfo *TTI, AssumptionCache *AC,
396                       OptimizationRemarkEmitter *ORE, unsigned VecWidth,
397                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
398                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
399                       ProfileSummaryInfo *PSI)
400       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
401         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
402         Builder(PSE.getSE()->getContext()),
403         VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM),
404         BFI(BFI), PSI(PSI) {
405     // Query this against the original loop and save it here because the profile
406     // of the original loop header may change as the transformation happens.
407     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
408         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
409   }
410 
411   virtual ~InnerLoopVectorizer() = default;
412 
413   /// Create a new empty loop. Unlink the old loop and connect the new one.
414   /// Return the pre-header block of the new loop.
415   BasicBlock *createVectorizedLoopSkeleton();
416 
417   /// Widen a single instruction within the innermost loop.
418   void widenInstruction(Instruction &I, VPUser &Operands,
419                         VPTransformState &State);
420 
421   /// Widen a single call instruction within the innermost loop.
422   void widenCallInstruction(CallInst &I, VPUser &ArgOperands,
423                             VPTransformState &State);
424 
425   /// Widen a single select instruction within the innermost loop.
426   void widenSelectInstruction(SelectInst &I, VPUser &Operands,
427                               bool InvariantCond, VPTransformState &State);
428 
429   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
430   void fixVectorizedLoop();
431 
432   // Return true if any runtime check is added.
433   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
434 
435   /// A type for vectorized values in the new loop. Each value from the
436   /// original loop, when vectorized, is represented by UF vector values in the
437   /// new unrolled loop, where UF is the unroll factor.
438   using VectorParts = SmallVector<Value *, 2>;
439 
440   /// Vectorize a single GetElementPtrInst based on information gathered and
441   /// decisions taken during planning.
442   void widenGEP(GetElementPtrInst *GEP, VPUser &Indices, unsigned UF,
443                 unsigned VF, bool IsPtrLoopInvariant,
444                 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State);
445 
446   /// Vectorize a single PHINode in a block. This method handles the induction
447   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
448   /// arbitrary length vectors.
449   void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF);
450 
451   /// A helper function to scalarize a single Instruction in the innermost loop.
452   /// Generates a sequence of scalar instances for each lane between \p MinLane
453   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
454   /// inclusive. Uses the VPValue operands from \p Operands instead of \p
455   /// Instr's operands.
456   void scalarizeInstruction(Instruction *Instr, VPUser &Operands,
457                             const VPIteration &Instance, bool IfPredicateInstr,
458                             VPTransformState &State);
459 
460   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
461   /// is provided, the integer induction variable will first be truncated to
462   /// the corresponding type.
463   void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr);
464 
465   /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a
466   /// vector or scalar value on-demand if one is not yet available. When
467   /// vectorizing a loop, we visit the definition of an instruction before its
468   /// uses. When visiting the definition, we either vectorize or scalarize the
469   /// instruction, creating an entry for it in the corresponding map. (In some
470   /// cases, such as induction variables, we will create both vector and scalar
471   /// entries.) Then, as we encounter uses of the definition, we derive values
472   /// for each scalar or vector use unless such a value is already available.
473   /// For example, if we scalarize a definition and one of its uses is vector,
474   /// we build the required vector on-demand with an insertelement sequence
475   /// when visiting the use. Otherwise, if the use is scalar, we can use the
476   /// existing scalar definition.
477   ///
478   /// Return a value in the new loop corresponding to \p V from the original
479   /// loop at unroll index \p Part. If the value has already been vectorized,
480   /// the corresponding vector entry in VectorLoopValueMap is returned. If,
481   /// however, the value has a scalar entry in VectorLoopValueMap, we construct
482   /// a new vector value on-demand by inserting the scalar values into a vector
483   /// with an insertelement sequence. If the value has been neither vectorized
484   /// nor scalarized, it must be loop invariant, so we simply broadcast the
485   /// value into a vector.
486   Value *getOrCreateVectorValue(Value *V, unsigned Part);
487 
488   /// Return a value in the new loop corresponding to \p V from the original
489   /// loop at unroll and vector indices \p Instance. If the value has been
490   /// vectorized but not scalarized, the necessary extractelement instruction
491   /// will be generated.
492   Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance);
493 
494   /// Construct the vector value of a scalarized value \p V one lane at a time.
495   void packScalarIntoVectorValue(Value *V, const VPIteration &Instance);
496 
497   /// Try to vectorize interleaved access group \p Group with the base address
498   /// given in \p Addr, optionally masking the vector operations if \p
499   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
500   /// values in the vectorized loop.
501   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
502                                 VPTransformState &State, VPValue *Addr,
503                                 VPValue *BlockInMask = nullptr);
504 
505   /// Vectorize Load and Store instructions with the base address given in \p
506   /// Addr, optionally masking the vector operations if \p BlockInMask is
507   /// non-null. Use \p State to translate given VPValues to IR values in the
508   /// vectorized loop.
509   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
510                                   VPValue *Addr, VPValue *StoredValue,
511                                   VPValue *BlockInMask);
512 
513   /// Set the debug location in the builder using the debug location in
514   /// the instruction.
515   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
516 
517   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
518   void fixNonInductionPHIs(void);
519 
520 protected:
521   friend class LoopVectorizationPlanner;
522 
523   /// A small list of PHINodes.
524   using PhiVector = SmallVector<PHINode *, 4>;
525 
526   /// A type for scalarized values in the new loop. Each value from the
527   /// original loop, when scalarized, is represented by UF x VF scalar values
528   /// in the new unrolled loop, where UF is the unroll factor and VF is the
529   /// vectorization factor.
530   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
531 
532   /// Set up the values of the IVs correctly when exiting the vector loop.
533   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
534                     Value *CountRoundDown, Value *EndValue,
535                     BasicBlock *MiddleBlock);
536 
537   /// Create a new induction variable inside L.
538   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
539                                    Value *Step, Instruction *DL);
540 
541   /// Handle all cross-iteration phis in the header.
542   void fixCrossIterationPHIs();
543 
544   /// Fix a first-order recurrence. This is the second phase of vectorizing
545   /// this phi node.
546   void fixFirstOrderRecurrence(PHINode *Phi);
547 
548   /// Fix a reduction cross-iteration phi. This is the second phase of
549   /// vectorizing this phi node.
550   void fixReduction(PHINode *Phi);
551 
552   /// Clear NSW/NUW flags from reduction instructions if necessary.
553   void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc);
554 
555   /// The Loop exit block may have single value PHI nodes with some
556   /// incoming value. While vectorizing we only handled real values
557   /// that were defined inside the loop and we should have one value for
558   /// each predecessor of its parent basic block. See PR14725.
559   void fixLCSSAPHIs();
560 
561   /// Iteratively sink the scalarized operands of a predicated instruction into
562   /// the block that was created for it.
563   void sinkScalarOperands(Instruction *PredInst);
564 
565   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
566   /// represented as.
567   void truncateToMinimalBitwidths();
568 
569   /// Create a broadcast instruction. This method generates a broadcast
570   /// instruction (shuffle) for loop invariant values and for the induction
571   /// value. If this is the induction variable then we extend it to N, N+1, ...
572   /// this is needed because each iteration in the loop corresponds to a SIMD
573   /// element.
574   virtual Value *getBroadcastInstrs(Value *V);
575 
576   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
577   /// to each vector element of Val. The sequence starts at StartIndex.
578   /// \p Opcode is relevant for FP induction variable.
579   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
580                                Instruction::BinaryOps Opcode =
581                                Instruction::BinaryOpsEnd);
582 
583   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
584   /// variable on which to base the steps, \p Step is the size of the step, and
585   /// \p EntryVal is the value from the original loop that maps to the steps.
586   /// Note that \p EntryVal doesn't have to be an induction variable - it
587   /// can also be a truncate instruction.
588   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
589                         const InductionDescriptor &ID);
590 
591   /// Create a vector induction phi node based on an existing scalar one. \p
592   /// EntryVal is the value from the original loop that maps to the vector phi
593   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
594   /// truncate instruction, instead of widening the original IV, we widen a
595   /// version of the IV truncated to \p EntryVal's type.
596   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
597                                        Value *Step, Instruction *EntryVal);
598 
599   /// Returns true if an instruction \p I should be scalarized instead of
600   /// vectorized for the chosen vectorization factor.
601   bool shouldScalarizeInstruction(Instruction *I) const;
602 
603   /// Returns true if we should generate a scalar version of \p IV.
604   bool needsScalarInduction(Instruction *IV) const;
605 
606   /// If there is a cast involved in the induction variable \p ID, which should
607   /// be ignored in the vectorized loop body, this function records the
608   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
609   /// cast. We had already proved that the casted Phi is equal to the uncasted
610   /// Phi in the vectorized loop (under a runtime guard), and therefore
611   /// there is no need to vectorize the cast - the same value can be used in the
612   /// vector loop for both the Phi and the cast.
613   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
614   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
615   ///
616   /// \p EntryVal is the value from the original loop that maps to the vector
617   /// phi node and is used to distinguish what is the IV currently being
618   /// processed - original one (if \p EntryVal is a phi corresponding to the
619   /// original IV) or the "newly-created" one based on the proof mentioned above
620   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
621   /// latter case \p EntryVal is a TruncInst and we must not record anything for
622   /// that IV, but it's error-prone to expect callers of this routine to care
623   /// about that, hence this explicit parameter.
624   void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID,
625                                              const Instruction *EntryVal,
626                                              Value *VectorLoopValue,
627                                              unsigned Part,
628                                              unsigned Lane = UINT_MAX);
629 
630   /// Generate a shuffle sequence that will reverse the vector Vec.
631   virtual Value *reverseVector(Value *Vec);
632 
633   /// Returns (and creates if needed) the original loop trip count.
634   Value *getOrCreateTripCount(Loop *NewLoop);
635 
636   /// Returns (and creates if needed) the trip count of the widened loop.
637   Value *getOrCreateVectorTripCount(Loop *NewLoop);
638 
639   /// Returns a bitcasted value to the requested vector type.
640   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
641   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
642                                 const DataLayout &DL);
643 
644   /// Emit a bypass check to see if the vector trip count is zero, including if
645   /// it overflows.
646   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
647 
648   /// Emit a bypass check to see if all of the SCEV assumptions we've
649   /// had to make are correct.
650   void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
651 
652   /// Emit bypass checks to check any memory assumptions we may have made.
653   void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
654 
655   /// Compute the transformed value of Index at offset StartValue using step
656   /// StepValue.
657   /// For integer induction, returns StartValue + Index * StepValue.
658   /// For pointer induction, returns StartValue[Index * StepValue].
659   /// FIXME: The newly created binary instructions should contain nsw/nuw
660   /// flags, which can be found from the original scalar operations.
661   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
662                               const DataLayout &DL,
663                               const InductionDescriptor &ID) const;
664 
665   /// Add additional metadata to \p To that was not present on \p Orig.
666   ///
667   /// Currently this is used to add the noalias annotations based on the
668   /// inserted memchecks.  Use this for instructions that are *cloned* into the
669   /// vector loop.
670   void addNewMetadata(Instruction *To, const Instruction *Orig);
671 
672   /// Add metadata from one instruction to another.
673   ///
674   /// This includes both the original MDs from \p From and additional ones (\see
675   /// addNewMetadata).  Use this for *newly created* instructions in the vector
676   /// loop.
677   void addMetadata(Instruction *To, Instruction *From);
678 
679   /// Similar to the previous function but it adds the metadata to a
680   /// vector of instructions.
681   void addMetadata(ArrayRef<Value *> To, Instruction *From);
682 
683   /// The original loop.
684   Loop *OrigLoop;
685 
686   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
687   /// dynamic knowledge to simplify SCEV expressions and converts them to a
688   /// more usable form.
689   PredicatedScalarEvolution &PSE;
690 
691   /// Loop Info.
692   LoopInfo *LI;
693 
694   /// Dominator Tree.
695   DominatorTree *DT;
696 
697   /// Alias Analysis.
698   AAResults *AA;
699 
700   /// Target Library Info.
701   const TargetLibraryInfo *TLI;
702 
703   /// Target Transform Info.
704   const TargetTransformInfo *TTI;
705 
706   /// Assumption Cache.
707   AssumptionCache *AC;
708 
709   /// Interface to emit optimization remarks.
710   OptimizationRemarkEmitter *ORE;
711 
712   /// LoopVersioning.  It's only set up (non-null) if memchecks were
713   /// used.
714   ///
715   /// This is currently only used to add no-alias metadata based on the
716   /// memchecks.  The actually versioning is performed manually.
717   std::unique_ptr<LoopVersioning> LVer;
718 
719   /// The vectorization SIMD factor to use. Each vector will have this many
720   /// vector elements.
721   unsigned VF;
722 
723   /// The vectorization unroll factor to use. Each scalar is vectorized to this
724   /// many different vector instructions.
725   unsigned UF;
726 
727   /// The builder that we use
728   IRBuilder<> Builder;
729 
730   // --- Vectorization state ---
731 
732   /// The vector-loop preheader.
733   BasicBlock *LoopVectorPreHeader;
734 
735   /// The scalar-loop preheader.
736   BasicBlock *LoopScalarPreHeader;
737 
738   /// Middle Block between the vector and the scalar.
739   BasicBlock *LoopMiddleBlock;
740 
741   /// The ExitBlock of the scalar loop.
742   BasicBlock *LoopExitBlock;
743 
744   /// The vector loop body.
745   BasicBlock *LoopVectorBody;
746 
747   /// The scalar loop body.
748   BasicBlock *LoopScalarBody;
749 
750   /// A list of all bypass blocks. The first block is the entry of the loop.
751   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
752 
753   /// The new Induction variable which was added to the new block.
754   PHINode *Induction = nullptr;
755 
756   /// The induction variable of the old basic block.
757   PHINode *OldInduction = nullptr;
758 
759   /// Maps values from the original loop to their corresponding values in the
760   /// vectorized loop. A key value can map to either vector values, scalar
761   /// values or both kinds of values, depending on whether the key was
762   /// vectorized and scalarized.
763   VectorizerValueMap VectorLoopValueMap;
764 
765   /// Store instructions that were predicated.
766   SmallVector<Instruction *, 4> PredicatedInstructions;
767 
768   /// Trip count of the original loop.
769   Value *TripCount = nullptr;
770 
771   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
772   Value *VectorTripCount = nullptr;
773 
774   /// The legality analysis.
775   LoopVectorizationLegality *Legal;
776 
777   /// The profitablity analysis.
778   LoopVectorizationCostModel *Cost;
779 
780   // Record whether runtime checks are added.
781   bool AddedSafetyChecks = false;
782 
783   // Holds the end values for each induction variable. We save the end values
784   // so we can later fix-up the external users of the induction variables.
785   DenseMap<PHINode *, Value *> IVEndValues;
786 
787   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
788   // fixed up at the end of vector code generation.
789   SmallVector<PHINode *, 8> OrigPHIsToFix;
790 
791   /// BFI and PSI are used to check for profile guided size optimizations.
792   BlockFrequencyInfo *BFI;
793   ProfileSummaryInfo *PSI;
794 
795   // Whether this loop should be optimized for size based on profile guided size
796   // optimizatios.
797   bool OptForSizeBasedOnProfile;
798 };
799 
800 class InnerLoopUnroller : public InnerLoopVectorizer {
801 public:
802   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
803                     LoopInfo *LI, DominatorTree *DT,
804                     const TargetLibraryInfo *TLI,
805                     const TargetTransformInfo *TTI, AssumptionCache *AC,
806                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
807                     LoopVectorizationLegality *LVL,
808                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
809                     ProfileSummaryInfo *PSI)
810       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1,
811                             UnrollFactor, LVL, CM, BFI, PSI) {}
812 
813 private:
814   Value *getBroadcastInstrs(Value *V) override;
815   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
816                        Instruction::BinaryOps Opcode =
817                        Instruction::BinaryOpsEnd) override;
818   Value *reverseVector(Value *Vec) override;
819 };
820 
821 } // end namespace llvm
822 
823 /// Look for a meaningful debug location on the instruction or it's
824 /// operands.
825 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
826   if (!I)
827     return I;
828 
829   DebugLoc Empty;
830   if (I->getDebugLoc() != Empty)
831     return I;
832 
833   for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
834     if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
835       if (OpInst->getDebugLoc() != Empty)
836         return OpInst;
837   }
838 
839   return I;
840 }
841 
842 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
843   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
844     const DILocation *DIL = Inst->getDebugLoc();
845     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
846         !isa<DbgInfoIntrinsic>(Inst)) {
847       auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(UF * VF);
848       if (NewDIL)
849         B.SetCurrentDebugLocation(NewDIL.getValue());
850       else
851         LLVM_DEBUG(dbgs()
852                    << "Failed to create new discriminator: "
853                    << DIL->getFilename() << " Line: " << DIL->getLine());
854     }
855     else
856       B.SetCurrentDebugLocation(DIL);
857   } else
858     B.SetCurrentDebugLocation(DebugLoc());
859 }
860 
861 /// Write a record \p DebugMsg about vectorization failure to the debug
862 /// output stream. If \p I is passed, it is an instruction that prevents
863 /// vectorization.
864 #ifndef NDEBUG
865 static void debugVectorizationFailure(const StringRef DebugMsg,
866     Instruction *I) {
867   dbgs() << "LV: Not vectorizing: " << DebugMsg;
868   if (I != nullptr)
869     dbgs() << " " << *I;
870   else
871     dbgs() << '.';
872   dbgs() << '\n';
873 }
874 #endif
875 
876 /// Create an analysis remark that explains why vectorization failed
877 ///
878 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
879 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
880 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
881 /// the location of the remark.  \return the remark object that can be
882 /// streamed to.
883 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
884     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
885   Value *CodeRegion = TheLoop->getHeader();
886   DebugLoc DL = TheLoop->getStartLoc();
887 
888   if (I) {
889     CodeRegion = I->getParent();
890     // If there is no debug location attached to the instruction, revert back to
891     // using the loop's.
892     if (I->getDebugLoc())
893       DL = I->getDebugLoc();
894   }
895 
896   OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
897   R << "loop not vectorized: ";
898   return R;
899 }
900 
901 namespace llvm {
902 
903 void reportVectorizationFailure(const StringRef DebugMsg,
904     const StringRef OREMsg, const StringRef ORETag,
905     OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) {
906   LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I));
907   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
908   ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(),
909                 ORETag, TheLoop, I) << OREMsg);
910 }
911 
912 } // end namespace llvm
913 
914 #ifndef NDEBUG
915 /// \return string containing a file name and a line # for the given loop.
916 static std::string getDebugLocString(const Loop *L) {
917   std::string Result;
918   if (L) {
919     raw_string_ostream OS(Result);
920     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
921       LoopDbgLoc.print(OS);
922     else
923       // Just print the module name.
924       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
925     OS.flush();
926   }
927   return Result;
928 }
929 #endif
930 
931 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
932                                          const Instruction *Orig) {
933   // If the loop was versioned with memchecks, add the corresponding no-alias
934   // metadata.
935   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
936     LVer->annotateInstWithNoAlias(To, Orig);
937 }
938 
939 void InnerLoopVectorizer::addMetadata(Instruction *To,
940                                       Instruction *From) {
941   propagateMetadata(To, From);
942   addNewMetadata(To, From);
943 }
944 
945 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
946                                       Instruction *From) {
947   for (Value *V : To) {
948     if (Instruction *I = dyn_cast<Instruction>(V))
949       addMetadata(I, From);
950   }
951 }
952 
953 namespace llvm {
954 
955 // Loop vectorization cost-model hints how the scalar epilogue loop should be
956 // lowered.
957 enum ScalarEpilogueLowering {
958 
959   // The default: allowing scalar epilogues.
960   CM_ScalarEpilogueAllowed,
961 
962   // Vectorization with OptForSize: don't allow epilogues.
963   CM_ScalarEpilogueNotAllowedOptSize,
964 
965   // A special case of vectorisation with OptForSize: loops with a very small
966   // trip count are considered for vectorization under OptForSize, thereby
967   // making sure the cost of their loop body is dominant, free of runtime
968   // guards and scalar iteration overheads.
969   CM_ScalarEpilogueNotAllowedLowTripLoop,
970 
971   // Loop hint predicate indicating an epilogue is undesired.
972   CM_ScalarEpilogueNotNeededUsePredicate
973 };
974 
975 /// LoopVectorizationCostModel - estimates the expected speedups due to
976 /// vectorization.
977 /// In many cases vectorization is not profitable. This can happen because of
978 /// a number of reasons. In this class we mainly attempt to predict the
979 /// expected speedup/slowdowns due to the supported instruction set. We use the
980 /// TargetTransformInfo to query the different backends for the cost of
981 /// different operations.
982 class LoopVectorizationCostModel {
983 public:
984   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
985                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
986                              LoopVectorizationLegality *Legal,
987                              const TargetTransformInfo &TTI,
988                              const TargetLibraryInfo *TLI, DemandedBits *DB,
989                              AssumptionCache *AC,
990                              OptimizationRemarkEmitter *ORE, const Function *F,
991                              const LoopVectorizeHints *Hints,
992                              InterleavedAccessInfo &IAI)
993       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
994         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
995         Hints(Hints), InterleaveInfo(IAI) {}
996 
997   /// \return An upper bound for the vectorization factor, or None if
998   /// vectorization and interleaving should be avoided up front.
999   Optional<unsigned> computeMaxVF(unsigned UserVF, unsigned UserIC);
1000 
1001   /// \return True if runtime checks are required for vectorization, and false
1002   /// otherwise.
1003   bool runtimeChecksRequired();
1004 
1005   /// \return The most profitable vectorization factor and the cost of that VF.
1006   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
1007   /// then this vectorization factor will be selected if vectorization is
1008   /// possible.
1009   VectorizationFactor selectVectorizationFactor(unsigned MaxVF);
1010 
1011   /// Setup cost-based decisions for user vectorization factor.
1012   void selectUserVectorizationFactor(unsigned UserVF) {
1013     collectUniformsAndScalars(UserVF);
1014     collectInstsToScalarize(UserVF);
1015   }
1016 
1017   /// \return The size (in bits) of the smallest and widest types in the code
1018   /// that needs to be vectorized. We ignore values that remain scalar such as
1019   /// 64 bit loop indices.
1020   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1021 
1022   /// \return The desired interleave count.
1023   /// If interleave count has been specified by metadata it will be returned.
1024   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1025   /// are the selected vectorization factor and the cost of the selected VF.
1026   unsigned selectInterleaveCount(unsigned VF, unsigned LoopCost);
1027 
1028   /// Memory access instruction may be vectorized in more than one way.
1029   /// Form of instruction after vectorization depends on cost.
1030   /// This function takes cost-based decisions for Load/Store instructions
1031   /// and collects them in a map. This decisions map is used for building
1032   /// the lists of loop-uniform and loop-scalar instructions.
1033   /// The calculated cost is saved with widening decision in order to
1034   /// avoid redundant calculations.
1035   void setCostBasedWideningDecision(unsigned VF);
1036 
1037   /// A struct that represents some properties of the register usage
1038   /// of a loop.
1039   struct RegisterUsage {
1040     /// Holds the number of loop invariant values that are used in the loop.
1041     /// The key is ClassID of target-provided register class.
1042     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1043     /// Holds the maximum number of concurrent live intervals in the loop.
1044     /// The key is ClassID of target-provided register class.
1045     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1046   };
1047 
1048   /// \return Returns information about the register usages of the loop for the
1049   /// given vectorization factors.
1050   SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs);
1051 
1052   /// Collect values we want to ignore in the cost model.
1053   void collectValuesToIgnore();
1054 
1055   /// \returns The smallest bitwidth each instruction can be represented with.
1056   /// The vector equivalents of these instructions should be truncated to this
1057   /// type.
1058   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1059     return MinBWs;
1060   }
1061 
1062   /// \returns True if it is more profitable to scalarize instruction \p I for
1063   /// vectorization factor \p VF.
1064   bool isProfitableToScalarize(Instruction *I, unsigned VF) const {
1065     assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1.");
1066 
1067     // Cost model is not run in the VPlan-native path - return conservative
1068     // result until this changes.
1069     if (EnableVPlanNativePath)
1070       return false;
1071 
1072     auto Scalars = InstsToScalarize.find(VF);
1073     assert(Scalars != InstsToScalarize.end() &&
1074            "VF not yet analyzed for scalarization profitability");
1075     return Scalars->second.find(I) != Scalars->second.end();
1076   }
1077 
1078   /// Returns true if \p I is known to be uniform after vectorization.
1079   bool isUniformAfterVectorization(Instruction *I, unsigned VF) const {
1080     if (VF == 1)
1081       return true;
1082 
1083     // Cost model is not run in the VPlan-native path - return conservative
1084     // result until this changes.
1085     if (EnableVPlanNativePath)
1086       return false;
1087 
1088     auto UniformsPerVF = Uniforms.find(VF);
1089     assert(UniformsPerVF != Uniforms.end() &&
1090            "VF not yet analyzed for uniformity");
1091     return UniformsPerVF->second.count(I);
1092   }
1093 
1094   /// Returns true if \p I is known to be scalar after vectorization.
1095   bool isScalarAfterVectorization(Instruction *I, unsigned VF) const {
1096     if (VF == 1)
1097       return true;
1098 
1099     // Cost model is not run in the VPlan-native path - return conservative
1100     // result until this changes.
1101     if (EnableVPlanNativePath)
1102       return false;
1103 
1104     auto ScalarsPerVF = Scalars.find(VF);
1105     assert(ScalarsPerVF != Scalars.end() &&
1106            "Scalar values are not calculated for VF");
1107     return ScalarsPerVF->second.count(I);
1108   }
1109 
1110   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1111   /// for vectorization factor \p VF.
1112   bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const {
1113     return VF > 1 && MinBWs.find(I) != MinBWs.end() &&
1114            !isProfitableToScalarize(I, VF) &&
1115            !isScalarAfterVectorization(I, VF);
1116   }
1117 
1118   /// Decision that was taken during cost calculation for memory instruction.
1119   enum InstWidening {
1120     CM_Unknown,
1121     CM_Widen,         // For consecutive accesses with stride +1.
1122     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1123     CM_Interleave,
1124     CM_GatherScatter,
1125     CM_Scalarize
1126   };
1127 
1128   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1129   /// instruction \p I and vector width \p VF.
1130   void setWideningDecision(Instruction *I, unsigned VF, InstWidening W,
1131                            unsigned Cost) {
1132     assert(VF >= 2 && "Expected VF >=2");
1133     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1134   }
1135 
1136   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1137   /// interleaving group \p Grp and vector width \p VF.
1138   void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF,
1139                            InstWidening W, unsigned Cost) {
1140     assert(VF >= 2 && "Expected VF >=2");
1141     /// Broadcast this decicion to all instructions inside the group.
1142     /// But the cost will be assigned to one instruction only.
1143     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1144       if (auto *I = Grp->getMember(i)) {
1145         if (Grp->getInsertPos() == I)
1146           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1147         else
1148           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1149       }
1150     }
1151   }
1152 
1153   /// Return the cost model decision for the given instruction \p I and vector
1154   /// width \p VF. Return CM_Unknown if this instruction did not pass
1155   /// through the cost modeling.
1156   InstWidening getWideningDecision(Instruction *I, unsigned VF) {
1157     assert(VF >= 2 && "Expected VF >=2");
1158 
1159     // Cost model is not run in the VPlan-native path - return conservative
1160     // result until this changes.
1161     if (EnableVPlanNativePath)
1162       return CM_GatherScatter;
1163 
1164     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1165     auto Itr = WideningDecisions.find(InstOnVF);
1166     if (Itr == WideningDecisions.end())
1167       return CM_Unknown;
1168     return Itr->second.first;
1169   }
1170 
1171   /// Return the vectorization cost for the given instruction \p I and vector
1172   /// width \p VF.
1173   unsigned getWideningCost(Instruction *I, unsigned VF) {
1174     assert(VF >= 2 && "Expected VF >=2");
1175     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1176     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1177            "The cost is not calculated");
1178     return WideningDecisions[InstOnVF].second;
1179   }
1180 
1181   /// Return True if instruction \p I is an optimizable truncate whose operand
1182   /// is an induction variable. Such a truncate will be removed by adding a new
1183   /// induction variable with the destination type.
1184   bool isOptimizableIVTruncate(Instruction *I, unsigned VF) {
1185     // If the instruction is not a truncate, return false.
1186     auto *Trunc = dyn_cast<TruncInst>(I);
1187     if (!Trunc)
1188       return false;
1189 
1190     // Get the source and destination types of the truncate.
1191     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1192     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1193 
1194     // If the truncate is free for the given types, return false. Replacing a
1195     // free truncate with an induction variable would add an induction variable
1196     // update instruction to each iteration of the loop. We exclude from this
1197     // check the primary induction variable since it will need an update
1198     // instruction regardless.
1199     Value *Op = Trunc->getOperand(0);
1200     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1201       return false;
1202 
1203     // If the truncated value is not an induction variable, return false.
1204     return Legal->isInductionPhi(Op);
1205   }
1206 
1207   /// Collects the instructions to scalarize for each predicated instruction in
1208   /// the loop.
1209   void collectInstsToScalarize(unsigned VF);
1210 
1211   /// Collect Uniform and Scalar values for the given \p VF.
1212   /// The sets depend on CM decision for Load/Store instructions
1213   /// that may be vectorized as interleave, gather-scatter or scalarized.
1214   void collectUniformsAndScalars(unsigned VF) {
1215     // Do the analysis once.
1216     if (VF == 1 || Uniforms.find(VF) != Uniforms.end())
1217       return;
1218     setCostBasedWideningDecision(VF);
1219     collectLoopUniforms(VF);
1220     collectLoopScalars(VF);
1221   }
1222 
1223   /// Returns true if the target machine supports masked store operation
1224   /// for the given \p DataType and kind of access to \p Ptr.
1225   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) {
1226     return Legal->isConsecutivePtr(Ptr) &&
1227            TTI.isLegalMaskedStore(DataType, Alignment);
1228   }
1229 
1230   /// Returns true if the target machine supports masked load operation
1231   /// for the given \p DataType and kind of access to \p Ptr.
1232   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) {
1233     return Legal->isConsecutivePtr(Ptr) &&
1234            TTI.isLegalMaskedLoad(DataType, Alignment);
1235   }
1236 
1237   /// Returns true if the target machine supports masked scatter operation
1238   /// for the given \p DataType.
1239   bool isLegalMaskedScatter(Type *DataType, Align Alignment) {
1240     return TTI.isLegalMaskedScatter(DataType, Alignment);
1241   }
1242 
1243   /// Returns true if the target machine supports masked gather operation
1244   /// for the given \p DataType.
1245   bool isLegalMaskedGather(Type *DataType, Align Alignment) {
1246     return TTI.isLegalMaskedGather(DataType, Alignment);
1247   }
1248 
1249   /// Returns true if the target machine can represent \p V as a masked gather
1250   /// or scatter operation.
1251   bool isLegalGatherOrScatter(Value *V) {
1252     bool LI = isa<LoadInst>(V);
1253     bool SI = isa<StoreInst>(V);
1254     if (!LI && !SI)
1255       return false;
1256     auto *Ty = getMemInstValueType(V);
1257     Align Align = getLoadStoreAlignment(V);
1258     return (LI && isLegalMaskedGather(Ty, Align)) ||
1259            (SI && isLegalMaskedScatter(Ty, Align));
1260   }
1261 
1262   /// Returns true if \p I is an instruction that will be scalarized with
1263   /// predication. Such instructions include conditional stores and
1264   /// instructions that may divide by zero.
1265   /// If a non-zero VF has been calculated, we check if I will be scalarized
1266   /// predication for that VF.
1267   bool isScalarWithPredication(Instruction *I, unsigned VF = 1);
1268 
1269   // Returns true if \p I is an instruction that will be predicated either
1270   // through scalar predication or masked load/store or masked gather/scatter.
1271   // Superset of instructions that return true for isScalarWithPredication.
1272   bool isPredicatedInst(Instruction *I) {
1273     if (!blockNeedsPredication(I->getParent()))
1274       return false;
1275     // Loads and stores that need some form of masked operation are predicated
1276     // instructions.
1277     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1278       return Legal->isMaskRequired(I);
1279     return isScalarWithPredication(I);
1280   }
1281 
1282   /// Returns true if \p I is a memory instruction with consecutive memory
1283   /// access that can be widened.
1284   bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1);
1285 
1286   /// Returns true if \p I is a memory instruction in an interleaved-group
1287   /// of memory accesses that can be vectorized with wide vector loads/stores
1288   /// and shuffles.
1289   bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1);
1290 
1291   /// Check if \p Instr belongs to any interleaved access group.
1292   bool isAccessInterleaved(Instruction *Instr) {
1293     return InterleaveInfo.isInterleaved(Instr);
1294   }
1295 
1296   /// Get the interleaved access group that \p Instr belongs to.
1297   const InterleaveGroup<Instruction> *
1298   getInterleavedAccessGroup(Instruction *Instr) {
1299     return InterleaveInfo.getInterleaveGroup(Instr);
1300   }
1301 
1302   /// Returns true if an interleaved group requires a scalar iteration
1303   /// to handle accesses with gaps, and there is nothing preventing us from
1304   /// creating a scalar epilogue.
1305   bool requiresScalarEpilogue() const {
1306     return isScalarEpilogueAllowed() && InterleaveInfo.requiresScalarEpilogue();
1307   }
1308 
1309   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1310   /// loop hint annotation.
1311   bool isScalarEpilogueAllowed() const {
1312     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1313   }
1314 
1315   /// Returns true if all loop blocks should be masked to fold tail loop.
1316   bool foldTailByMasking() const { return FoldTailByMasking; }
1317 
1318   bool blockNeedsPredication(BasicBlock *BB) {
1319     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1320   }
1321 
1322   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1323   /// with factor VF.  Return the cost of the instruction, including
1324   /// scalarization overhead if it's needed.
1325   unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF);
1326 
1327   /// Estimate cost of a call instruction CI if it were vectorized with factor
1328   /// VF. Return the cost of the instruction, including scalarization overhead
1329   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1330   /// scalarized -
1331   /// i.e. either vector version isn't available, or is too expensive.
1332   unsigned getVectorCallCost(CallInst *CI, unsigned VF, bool &NeedToScalarize);
1333 
1334   /// Invalidates decisions already taken by the cost model.
1335   void invalidateCostModelingDecisions() {
1336     WideningDecisions.clear();
1337     Uniforms.clear();
1338     Scalars.clear();
1339   }
1340 
1341 private:
1342   unsigned NumPredStores = 0;
1343 
1344   /// \return An upper bound for the vectorization factor, a power-of-2 larger
1345   /// than zero. One is returned if vectorization should best be avoided due
1346   /// to cost.
1347   unsigned computeFeasibleMaxVF(unsigned ConstTripCount);
1348 
1349   /// The vectorization cost is a combination of the cost itself and a boolean
1350   /// indicating whether any of the contributing operations will actually
1351   /// operate on
1352   /// vector values after type legalization in the backend. If this latter value
1353   /// is
1354   /// false, then all operations will be scalarized (i.e. no vectorization has
1355   /// actually taken place).
1356   using VectorizationCostTy = std::pair<unsigned, bool>;
1357 
1358   /// Returns the expected execution cost. The unit of the cost does
1359   /// not matter because we use the 'cost' units to compare different
1360   /// vector widths. The cost that is returned is *not* normalized by
1361   /// the factor width.
1362   VectorizationCostTy expectedCost(unsigned VF);
1363 
1364   /// Returns the execution time cost of an instruction for a given vector
1365   /// width. Vector width of one means scalar.
1366   VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF);
1367 
1368   /// The cost-computation logic from getInstructionCost which provides
1369   /// the vector type as an output parameter.
1370   unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy);
1371 
1372   /// Calculate vectorization cost of memory instruction \p I.
1373   unsigned getMemoryInstructionCost(Instruction *I, unsigned VF);
1374 
1375   /// The cost computation for scalarized memory instruction.
1376   unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF);
1377 
1378   /// The cost computation for interleaving group of memory instructions.
1379   unsigned getInterleaveGroupCost(Instruction *I, unsigned VF);
1380 
1381   /// The cost computation for Gather/Scatter instruction.
1382   unsigned getGatherScatterCost(Instruction *I, unsigned VF);
1383 
1384   /// The cost computation for widening instruction \p I with consecutive
1385   /// memory access.
1386   unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF);
1387 
1388   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1389   /// Load: scalar load + broadcast.
1390   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1391   /// element)
1392   unsigned getUniformMemOpCost(Instruction *I, unsigned VF);
1393 
1394   /// Estimate the overhead of scalarizing an instruction. This is a
1395   /// convenience wrapper for the type-based getScalarizationOverhead API.
1396   unsigned getScalarizationOverhead(Instruction *I, unsigned VF);
1397 
1398   /// Returns whether the instruction is a load or store and will be a emitted
1399   /// as a vector operation.
1400   bool isConsecutiveLoadOrStore(Instruction *I);
1401 
1402   /// Returns true if an artificially high cost for emulated masked memrefs
1403   /// should be used.
1404   bool useEmulatedMaskMemRefHack(Instruction *I);
1405 
1406   /// Map of scalar integer values to the smallest bitwidth they can be legally
1407   /// represented as. The vector equivalents of these values should be truncated
1408   /// to this type.
1409   MapVector<Instruction *, uint64_t> MinBWs;
1410 
1411   /// A type representing the costs for instructions if they were to be
1412   /// scalarized rather than vectorized. The entries are Instruction-Cost
1413   /// pairs.
1414   using ScalarCostsTy = DenseMap<Instruction *, unsigned>;
1415 
1416   /// A set containing all BasicBlocks that are known to present after
1417   /// vectorization as a predicated block.
1418   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1419 
1420   /// Records whether it is allowed to have the original scalar loop execute at
1421   /// least once. This may be needed as a fallback loop in case runtime
1422   /// aliasing/dependence checks fail, or to handle the tail/remainder
1423   /// iterations when the trip count is unknown or doesn't divide by the VF,
1424   /// or as a peel-loop to handle gaps in interleave-groups.
1425   /// Under optsize and when the trip count is very small we don't allow any
1426   /// iterations to execute in the scalar loop.
1427   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1428 
1429   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1430   bool FoldTailByMasking = false;
1431 
1432   /// A map holding scalar costs for different vectorization factors. The
1433   /// presence of a cost for an instruction in the mapping indicates that the
1434   /// instruction will be scalarized when vectorizing with the associated
1435   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1436   DenseMap<unsigned, ScalarCostsTy> InstsToScalarize;
1437 
1438   /// Holds the instructions known to be uniform after vectorization.
1439   /// The data is collected per VF.
1440   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms;
1441 
1442   /// Holds the instructions known to be scalar after vectorization.
1443   /// The data is collected per VF.
1444   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars;
1445 
1446   /// Holds the instructions (address computations) that are forced to be
1447   /// scalarized.
1448   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1449 
1450   /// Returns the expected difference in cost from scalarizing the expression
1451   /// feeding a predicated instruction \p PredInst. The instructions to
1452   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1453   /// non-negative return value implies the expression will be scalarized.
1454   /// Currently, only single-use chains are considered for scalarization.
1455   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1456                               unsigned VF);
1457 
1458   /// Collect the instructions that are uniform after vectorization. An
1459   /// instruction is uniform if we represent it with a single scalar value in
1460   /// the vectorized loop corresponding to each vector iteration. Examples of
1461   /// uniform instructions include pointer operands of consecutive or
1462   /// interleaved memory accesses. Note that although uniformity implies an
1463   /// instruction will be scalar, the reverse is not true. In general, a
1464   /// scalarized instruction will be represented by VF scalar values in the
1465   /// vectorized loop, each corresponding to an iteration of the original
1466   /// scalar loop.
1467   void collectLoopUniforms(unsigned VF);
1468 
1469   /// Collect the instructions that are scalar after vectorization. An
1470   /// instruction is scalar if it is known to be uniform or will be scalarized
1471   /// during vectorization. Non-uniform scalarized instructions will be
1472   /// represented by VF values in the vectorized loop, each corresponding to an
1473   /// iteration of the original scalar loop.
1474   void collectLoopScalars(unsigned VF);
1475 
1476   /// Keeps cost model vectorization decision and cost for instructions.
1477   /// Right now it is used for memory instructions only.
1478   using DecisionList = DenseMap<std::pair<Instruction *, unsigned>,
1479                                 std::pair<InstWidening, unsigned>>;
1480 
1481   DecisionList WideningDecisions;
1482 
1483   /// Returns true if \p V is expected to be vectorized and it needs to be
1484   /// extracted.
1485   bool needsExtract(Value *V, unsigned VF) const {
1486     Instruction *I = dyn_cast<Instruction>(V);
1487     if (VF == 1 || !I || !TheLoop->contains(I) || TheLoop->isLoopInvariant(I))
1488       return false;
1489 
1490     // Assume we can vectorize V (and hence we need extraction) if the
1491     // scalars are not computed yet. This can happen, because it is called
1492     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1493     // the scalars are collected. That should be a safe assumption in most
1494     // cases, because we check if the operands have vectorizable types
1495     // beforehand in LoopVectorizationLegality.
1496     return Scalars.find(VF) == Scalars.end() ||
1497            !isScalarAfterVectorization(I, VF);
1498   };
1499 
1500   /// Returns a range containing only operands needing to be extracted.
1501   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1502                                                    unsigned VF) {
1503     return SmallVector<Value *, 4>(make_filter_range(
1504         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1505   }
1506 
1507 public:
1508   /// The loop that we evaluate.
1509   Loop *TheLoop;
1510 
1511   /// Predicated scalar evolution analysis.
1512   PredicatedScalarEvolution &PSE;
1513 
1514   /// Loop Info analysis.
1515   LoopInfo *LI;
1516 
1517   /// Vectorization legality.
1518   LoopVectorizationLegality *Legal;
1519 
1520   /// Vector target information.
1521   const TargetTransformInfo &TTI;
1522 
1523   /// Target Library Info.
1524   const TargetLibraryInfo *TLI;
1525 
1526   /// Demanded bits analysis.
1527   DemandedBits *DB;
1528 
1529   /// Assumption cache.
1530   AssumptionCache *AC;
1531 
1532   /// Interface to emit optimization remarks.
1533   OptimizationRemarkEmitter *ORE;
1534 
1535   const Function *TheFunction;
1536 
1537   /// Loop Vectorize Hint.
1538   const LoopVectorizeHints *Hints;
1539 
1540   /// The interleave access information contains groups of interleaved accesses
1541   /// with the same stride and close to each other.
1542   InterleavedAccessInfo &InterleaveInfo;
1543 
1544   /// Values to ignore in the cost model.
1545   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1546 
1547   /// Values to ignore in the cost model when VF > 1.
1548   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1549 };
1550 
1551 } // end namespace llvm
1552 
1553 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
1554 // vectorization. The loop needs to be annotated with #pragma omp simd
1555 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
1556 // vector length information is not provided, vectorization is not considered
1557 // explicit. Interleave hints are not allowed either. These limitations will be
1558 // relaxed in the future.
1559 // Please, note that we are currently forced to abuse the pragma 'clang
1560 // vectorize' semantics. This pragma provides *auto-vectorization hints*
1561 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
1562 // provides *explicit vectorization hints* (LV can bypass legal checks and
1563 // assume that vectorization is legal). However, both hints are implemented
1564 // using the same metadata (llvm.loop.vectorize, processed by
1565 // LoopVectorizeHints). This will be fixed in the future when the native IR
1566 // representation for pragma 'omp simd' is introduced.
1567 static bool isExplicitVecOuterLoop(Loop *OuterLp,
1568                                    OptimizationRemarkEmitter *ORE) {
1569   assert(!OuterLp->empty() && "This is not an outer loop");
1570   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
1571 
1572   // Only outer loops with an explicit vectorization hint are supported.
1573   // Unannotated outer loops are ignored.
1574   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
1575     return false;
1576 
1577   Function *Fn = OuterLp->getHeader()->getParent();
1578   if (!Hints.allowVectorization(Fn, OuterLp,
1579                                 true /*VectorizeOnlyWhenForced*/)) {
1580     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
1581     return false;
1582   }
1583 
1584   if (Hints.getInterleave() > 1) {
1585     // TODO: Interleave support is future work.
1586     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
1587                          "outer loops.\n");
1588     Hints.emitRemarkWithHints();
1589     return false;
1590   }
1591 
1592   return true;
1593 }
1594 
1595 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
1596                                   OptimizationRemarkEmitter *ORE,
1597                                   SmallVectorImpl<Loop *> &V) {
1598   // Collect inner loops and outer loops without irreducible control flow. For
1599   // now, only collect outer loops that have explicit vectorization hints. If we
1600   // are stress testing the VPlan H-CFG construction, we collect the outermost
1601   // loop of every loop nest.
1602   if (L.empty() || VPlanBuildStressTest ||
1603       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
1604     LoopBlocksRPO RPOT(&L);
1605     RPOT.perform(LI);
1606     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
1607       V.push_back(&L);
1608       // TODO: Collect inner loops inside marked outer loops in case
1609       // vectorization fails for the outer loop. Do not invoke
1610       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
1611       // already known to be reducible. We can use an inherited attribute for
1612       // that.
1613       return;
1614     }
1615   }
1616   for (Loop *InnerL : L)
1617     collectSupportedLoops(*InnerL, LI, ORE, V);
1618 }
1619 
1620 namespace {
1621 
1622 /// The LoopVectorize Pass.
1623 struct LoopVectorize : public FunctionPass {
1624   /// Pass identification, replacement for typeid
1625   static char ID;
1626 
1627   LoopVectorizePass Impl;
1628 
1629   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
1630                          bool VectorizeOnlyWhenForced = false)
1631       : FunctionPass(ID),
1632         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
1633     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
1634   }
1635 
1636   bool runOnFunction(Function &F) override {
1637     if (skipFunction(F))
1638       return false;
1639 
1640     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1641     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1642     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1643     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1644     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
1645     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1646     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
1647     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1648     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1649     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
1650     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
1651     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
1652     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
1653 
1654     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
1655         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
1656 
1657     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
1658                         GetLAA, *ORE, PSI).MadeAnyChange;
1659   }
1660 
1661   void getAnalysisUsage(AnalysisUsage &AU) const override {
1662     AU.addRequired<AssumptionCacheTracker>();
1663     AU.addRequired<BlockFrequencyInfoWrapperPass>();
1664     AU.addRequired<DominatorTreeWrapperPass>();
1665     AU.addRequired<LoopInfoWrapperPass>();
1666     AU.addRequired<ScalarEvolutionWrapperPass>();
1667     AU.addRequired<TargetTransformInfoWrapperPass>();
1668     AU.addRequired<AAResultsWrapperPass>();
1669     AU.addRequired<LoopAccessLegacyAnalysis>();
1670     AU.addRequired<DemandedBitsWrapperPass>();
1671     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
1672     AU.addRequired<InjectTLIMappingsLegacy>();
1673 
1674     // We currently do not preserve loopinfo/dominator analyses with outer loop
1675     // vectorization. Until this is addressed, mark these analyses as preserved
1676     // only for non-VPlan-native path.
1677     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
1678     if (!EnableVPlanNativePath) {
1679       AU.addPreserved<LoopInfoWrapperPass>();
1680       AU.addPreserved<DominatorTreeWrapperPass>();
1681     }
1682 
1683     AU.addPreserved<BasicAAWrapperPass>();
1684     AU.addPreserved<GlobalsAAWrapperPass>();
1685     AU.addRequired<ProfileSummaryInfoWrapperPass>();
1686   }
1687 };
1688 
1689 } // end anonymous namespace
1690 
1691 //===----------------------------------------------------------------------===//
1692 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
1693 // LoopVectorizationCostModel and LoopVectorizationPlanner.
1694 //===----------------------------------------------------------------------===//
1695 
1696 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
1697   // We need to place the broadcast of invariant variables outside the loop,
1698   // but only if it's proven safe to do so. Else, broadcast will be inside
1699   // vector loop body.
1700   Instruction *Instr = dyn_cast<Instruction>(V);
1701   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
1702                      (!Instr ||
1703                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
1704   // Place the code for broadcasting invariant variables in the new preheader.
1705   IRBuilder<>::InsertPointGuard Guard(Builder);
1706   if (SafeToHoist)
1707     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1708 
1709   // Broadcast the scalar into all locations in the vector.
1710   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
1711 
1712   return Shuf;
1713 }
1714 
1715 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
1716     const InductionDescriptor &II, Value *Step, Instruction *EntryVal) {
1717   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1718          "Expected either an induction phi-node or a truncate of it!");
1719   Value *Start = II.getStartValue();
1720 
1721   // Construct the initial value of the vector IV in the vector loop preheader
1722   auto CurrIP = Builder.saveIP();
1723   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1724   if (isa<TruncInst>(EntryVal)) {
1725     assert(Start->getType()->isIntegerTy() &&
1726            "Truncation requires an integer type");
1727     auto *TruncType = cast<IntegerType>(EntryVal->getType());
1728     Step = Builder.CreateTrunc(Step, TruncType);
1729     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
1730   }
1731   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
1732   Value *SteppedStart =
1733       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
1734 
1735   // We create vector phi nodes for both integer and floating-point induction
1736   // variables. Here, we determine the kind of arithmetic we will perform.
1737   Instruction::BinaryOps AddOp;
1738   Instruction::BinaryOps MulOp;
1739   if (Step->getType()->isIntegerTy()) {
1740     AddOp = Instruction::Add;
1741     MulOp = Instruction::Mul;
1742   } else {
1743     AddOp = II.getInductionOpcode();
1744     MulOp = Instruction::FMul;
1745   }
1746 
1747   // Multiply the vectorization factor by the step using integer or
1748   // floating-point arithmetic as appropriate.
1749   Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF);
1750   Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
1751 
1752   // Create a vector splat to use in the induction update.
1753   //
1754   // FIXME: If the step is non-constant, we create the vector splat with
1755   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
1756   //        handle a constant vector splat.
1757   Value *SplatVF =
1758       isa<Constant>(Mul)
1759           ? ConstantVector::getSplat({VF, false}, cast<Constant>(Mul))
1760           : Builder.CreateVectorSplat(VF, Mul);
1761   Builder.restoreIP(CurrIP);
1762 
1763   // We may need to add the step a number of times, depending on the unroll
1764   // factor. The last of those goes into the PHI.
1765   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
1766                                     &*LoopVectorBody->getFirstInsertionPt());
1767   VecInd->setDebugLoc(EntryVal->getDebugLoc());
1768   Instruction *LastInduction = VecInd;
1769   for (unsigned Part = 0; Part < UF; ++Part) {
1770     VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction);
1771 
1772     if (isa<TruncInst>(EntryVal))
1773       addMetadata(LastInduction, EntryVal);
1774     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part);
1775 
1776     LastInduction = cast<Instruction>(addFastMathFlag(
1777         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
1778     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
1779   }
1780 
1781   // Move the last step to the end of the latch block. This ensures consistent
1782   // placement of all induction updates.
1783   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
1784   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
1785   auto *ICmp = cast<Instruction>(Br->getCondition());
1786   LastInduction->moveBefore(ICmp);
1787   LastInduction->setName("vec.ind.next");
1788 
1789   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
1790   VecInd->addIncoming(LastInduction, LoopVectorLatch);
1791 }
1792 
1793 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
1794   return Cost->isScalarAfterVectorization(I, VF) ||
1795          Cost->isProfitableToScalarize(I, VF);
1796 }
1797 
1798 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
1799   if (shouldScalarizeInstruction(IV))
1800     return true;
1801   auto isScalarInst = [&](User *U) -> bool {
1802     auto *I = cast<Instruction>(U);
1803     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
1804   };
1805   return llvm::any_of(IV->users(), isScalarInst);
1806 }
1807 
1808 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
1809     const InductionDescriptor &ID, const Instruction *EntryVal,
1810     Value *VectorLoopVal, unsigned Part, unsigned Lane) {
1811   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1812          "Expected either an induction phi-node or a truncate of it!");
1813 
1814   // This induction variable is not the phi from the original loop but the
1815   // newly-created IV based on the proof that casted Phi is equal to the
1816   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
1817   // re-uses the same InductionDescriptor that original IV uses but we don't
1818   // have to do any recording in this case - that is done when original IV is
1819   // processed.
1820   if (isa<TruncInst>(EntryVal))
1821     return;
1822 
1823   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
1824   if (Casts.empty())
1825     return;
1826   // Only the first Cast instruction in the Casts vector is of interest.
1827   // The rest of the Casts (if exist) have no uses outside the
1828   // induction update chain itself.
1829   Instruction *CastInst = *Casts.begin();
1830   if (Lane < UINT_MAX)
1831     VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal);
1832   else
1833     VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal);
1834 }
1835 
1836 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) {
1837   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
1838          "Primary induction variable must have an integer type");
1839 
1840   auto II = Legal->getInductionVars().find(IV);
1841   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
1842 
1843   auto ID = II->second;
1844   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
1845 
1846   // The value from the original loop to which we are mapping the new induction
1847   // variable.
1848   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
1849 
1850   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
1851 
1852   // Generate code for the induction step. Note that induction steps are
1853   // required to be loop-invariant
1854   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
1855     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
1856            "Induction step should be loop invariant");
1857     if (PSE.getSE()->isSCEVable(IV->getType())) {
1858       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
1859       return Exp.expandCodeFor(Step, Step->getType(),
1860                                LoopVectorPreHeader->getTerminator());
1861     }
1862     return cast<SCEVUnknown>(Step)->getValue();
1863   };
1864 
1865   // The scalar value to broadcast. This is derived from the canonical
1866   // induction variable. If a truncation type is given, truncate the canonical
1867   // induction variable and step. Otherwise, derive these values from the
1868   // induction descriptor.
1869   auto CreateScalarIV = [&](Value *&Step) -> Value * {
1870     Value *ScalarIV = Induction;
1871     if (IV != OldInduction) {
1872       ScalarIV = IV->getType()->isIntegerTy()
1873                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
1874                      : Builder.CreateCast(Instruction::SIToFP, Induction,
1875                                           IV->getType());
1876       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
1877       ScalarIV->setName("offset.idx");
1878     }
1879     if (Trunc) {
1880       auto *TruncType = cast<IntegerType>(Trunc->getType());
1881       assert(Step->getType()->isIntegerTy() &&
1882              "Truncation requires an integer step");
1883       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
1884       Step = Builder.CreateTrunc(Step, TruncType);
1885     }
1886     return ScalarIV;
1887   };
1888 
1889   // Create the vector values from the scalar IV, in the absence of creating a
1890   // vector IV.
1891   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
1892     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
1893     for (unsigned Part = 0; Part < UF; ++Part) {
1894       Value *EntryPart =
1895           getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode());
1896       VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart);
1897       if (Trunc)
1898         addMetadata(EntryPart, Trunc);
1899       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part);
1900     }
1901   };
1902 
1903   // Now do the actual transformations, and start with creating the step value.
1904   Value *Step = CreateStepValue(ID.getStep());
1905   if (VF <= 1) {
1906     Value *ScalarIV = CreateScalarIV(Step);
1907     CreateSplatIV(ScalarIV, Step);
1908     return;
1909   }
1910 
1911   // Determine if we want a scalar version of the induction variable. This is
1912   // true if the induction variable itself is not widened, or if it has at
1913   // least one user in the loop that is not widened.
1914   auto NeedsScalarIV = needsScalarInduction(EntryVal);
1915   if (!NeedsScalarIV) {
1916     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
1917     return;
1918   }
1919 
1920   // Try to create a new independent vector induction variable. If we can't
1921   // create the phi node, we will splat the scalar induction variable in each
1922   // loop iteration.
1923   if (!shouldScalarizeInstruction(EntryVal)) {
1924     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
1925     Value *ScalarIV = CreateScalarIV(Step);
1926     // Create scalar steps that can be used by instructions we will later
1927     // scalarize. Note that the addition of the scalar steps will not increase
1928     // the number of instructions in the loop in the common case prior to
1929     // InstCombine. We will be trading one vector extract for each scalar step.
1930     buildScalarSteps(ScalarIV, Step, EntryVal, ID);
1931     return;
1932   }
1933 
1934   // All IV users are scalar instructions, so only emit a scalar IV, not a
1935   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
1936   // predicate used by the masked loads/stores.
1937   Value *ScalarIV = CreateScalarIV(Step);
1938   if (!Cost->isScalarEpilogueAllowed())
1939     CreateSplatIV(ScalarIV, Step);
1940   buildScalarSteps(ScalarIV, Step, EntryVal, ID);
1941 }
1942 
1943 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
1944                                           Instruction::BinaryOps BinOp) {
1945   // Create and check the types.
1946   auto *ValVTy = cast<VectorType>(Val->getType());
1947   int VLen = ValVTy->getNumElements();
1948 
1949   Type *STy = Val->getType()->getScalarType();
1950   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
1951          "Induction Step must be an integer or FP");
1952   assert(Step->getType() == STy && "Step has wrong type");
1953 
1954   SmallVector<Constant *, 8> Indices;
1955 
1956   if (STy->isIntegerTy()) {
1957     // Create a vector of consecutive numbers from zero to VF.
1958     for (int i = 0; i < VLen; ++i)
1959       Indices.push_back(ConstantInt::get(STy, StartIdx + i));
1960 
1961     // Add the consecutive indices to the vector value.
1962     Constant *Cv = ConstantVector::get(Indices);
1963     assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
1964     Step = Builder.CreateVectorSplat(VLen, Step);
1965     assert(Step->getType() == Val->getType() && "Invalid step vec");
1966     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
1967     // which can be found from the original scalar operations.
1968     Step = Builder.CreateMul(Cv, Step);
1969     return Builder.CreateAdd(Val, Step, "induction");
1970   }
1971 
1972   // Floating point induction.
1973   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
1974          "Binary Opcode should be specified for FP induction");
1975   // Create a vector of consecutive numbers from zero to VF.
1976   for (int i = 0; i < VLen; ++i)
1977     Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
1978 
1979   // Add the consecutive indices to the vector value.
1980   Constant *Cv = ConstantVector::get(Indices);
1981 
1982   Step = Builder.CreateVectorSplat(VLen, Step);
1983 
1984   // Floating point operations had to be 'fast' to enable the induction.
1985   FastMathFlags Flags;
1986   Flags.setFast();
1987 
1988   Value *MulOp = Builder.CreateFMul(Cv, Step);
1989   if (isa<Instruction>(MulOp))
1990     // Have to check, MulOp may be a constant
1991     cast<Instruction>(MulOp)->setFastMathFlags(Flags);
1992 
1993   Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
1994   if (isa<Instruction>(BOp))
1995     cast<Instruction>(BOp)->setFastMathFlags(Flags);
1996   return BOp;
1997 }
1998 
1999 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2000                                            Instruction *EntryVal,
2001                                            const InductionDescriptor &ID) {
2002   // We shouldn't have to build scalar steps if we aren't vectorizing.
2003   assert(VF > 1 && "VF should be greater than one");
2004 
2005   // Get the value type and ensure it and the step have the same integer type.
2006   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2007   assert(ScalarIVTy == Step->getType() &&
2008          "Val and Step should have the same type");
2009 
2010   // We build scalar steps for both integer and floating-point induction
2011   // variables. Here, we determine the kind of arithmetic we will perform.
2012   Instruction::BinaryOps AddOp;
2013   Instruction::BinaryOps MulOp;
2014   if (ScalarIVTy->isIntegerTy()) {
2015     AddOp = Instruction::Add;
2016     MulOp = Instruction::Mul;
2017   } else {
2018     AddOp = ID.getInductionOpcode();
2019     MulOp = Instruction::FMul;
2020   }
2021 
2022   // Determine the number of scalars we need to generate for each unroll
2023   // iteration. If EntryVal is uniform, we only need to generate the first
2024   // lane. Otherwise, we generate all VF values.
2025   unsigned Lanes =
2026       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1
2027                                                                          : VF;
2028   // Compute the scalar steps and save the results in VectorLoopValueMap.
2029   for (unsigned Part = 0; Part < UF; ++Part) {
2030     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2031       auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane);
2032       auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
2033       auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
2034       VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add);
2035       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane);
2036     }
2037   }
2038 }
2039 
2040 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) {
2041   assert(V != Induction && "The new induction variable should not be used.");
2042   assert(!V->getType()->isVectorTy() && "Can't widen a vector");
2043   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2044 
2045   // If we have a stride that is replaced by one, do it here. Defer this for
2046   // the VPlan-native path until we start running Legal checks in that path.
2047   if (!EnableVPlanNativePath && Legal->hasStride(V))
2048     V = ConstantInt::get(V->getType(), 1);
2049 
2050   // If we have a vector mapped to this value, return it.
2051   if (VectorLoopValueMap.hasVectorValue(V, Part))
2052     return VectorLoopValueMap.getVectorValue(V, Part);
2053 
2054   // If the value has not been vectorized, check if it has been scalarized
2055   // instead. If it has been scalarized, and we actually need the value in
2056   // vector form, we will construct the vector values on demand.
2057   if (VectorLoopValueMap.hasAnyScalarValue(V)) {
2058     Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0});
2059 
2060     // If we've scalarized a value, that value should be an instruction.
2061     auto *I = cast<Instruction>(V);
2062 
2063     // If we aren't vectorizing, we can just copy the scalar map values over to
2064     // the vector map.
2065     if (VF == 1) {
2066       VectorLoopValueMap.setVectorValue(V, Part, ScalarValue);
2067       return ScalarValue;
2068     }
2069 
2070     // Get the last scalar instruction we generated for V and Part. If the value
2071     // is known to be uniform after vectorization, this corresponds to lane zero
2072     // of the Part unroll iteration. Otherwise, the last instruction is the one
2073     // we created for the last vector lane of the Part unroll iteration.
2074     unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1;
2075     auto *LastInst = cast<Instruction>(
2076         VectorLoopValueMap.getScalarValue(V, {Part, LastLane}));
2077 
2078     // Set the insert point after the last scalarized instruction. This ensures
2079     // the insertelement sequence will directly follow the scalar definitions.
2080     auto OldIP = Builder.saveIP();
2081     auto NewIP = std::next(BasicBlock::iterator(LastInst));
2082     Builder.SetInsertPoint(&*NewIP);
2083 
2084     // However, if we are vectorizing, we need to construct the vector values.
2085     // If the value is known to be uniform after vectorization, we can just
2086     // broadcast the scalar value corresponding to lane zero for each unroll
2087     // iteration. Otherwise, we construct the vector values using insertelement
2088     // instructions. Since the resulting vectors are stored in
2089     // VectorLoopValueMap, we will only generate the insertelements once.
2090     Value *VectorValue = nullptr;
2091     if (Cost->isUniformAfterVectorization(I, VF)) {
2092       VectorValue = getBroadcastInstrs(ScalarValue);
2093       VectorLoopValueMap.setVectorValue(V, Part, VectorValue);
2094     } else {
2095       // Initialize packing with insertelements to start from undef.
2096       Value *Undef = UndefValue::get(FixedVectorType::get(V->getType(), VF));
2097       VectorLoopValueMap.setVectorValue(V, Part, Undef);
2098       for (unsigned Lane = 0; Lane < VF; ++Lane)
2099         packScalarIntoVectorValue(V, {Part, Lane});
2100       VectorValue = VectorLoopValueMap.getVectorValue(V, Part);
2101     }
2102     Builder.restoreIP(OldIP);
2103     return VectorValue;
2104   }
2105 
2106   // If this scalar is unknown, assume that it is a constant or that it is
2107   // loop invariant. Broadcast V and save the value for future uses.
2108   Value *B = getBroadcastInstrs(V);
2109   VectorLoopValueMap.setVectorValue(V, Part, B);
2110   return B;
2111 }
2112 
2113 Value *
2114 InnerLoopVectorizer::getOrCreateScalarValue(Value *V,
2115                                             const VPIteration &Instance) {
2116   // If the value is not an instruction contained in the loop, it should
2117   // already be scalar.
2118   if (OrigLoop->isLoopInvariant(V))
2119     return V;
2120 
2121   assert(Instance.Lane > 0
2122              ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
2123              : true && "Uniform values only have lane zero");
2124 
2125   // If the value from the original loop has not been vectorized, it is
2126   // represented by UF x VF scalar values in the new loop. Return the requested
2127   // scalar value.
2128   if (VectorLoopValueMap.hasScalarValue(V, Instance))
2129     return VectorLoopValueMap.getScalarValue(V, Instance);
2130 
2131   // If the value has not been scalarized, get its entry in VectorLoopValueMap
2132   // for the given unroll part. If this entry is not a vector type (i.e., the
2133   // vectorization factor is one), there is no need to generate an
2134   // extractelement instruction.
2135   auto *U = getOrCreateVectorValue(V, Instance.Part);
2136   if (!U->getType()->isVectorTy()) {
2137     assert(VF == 1 && "Value not scalarized has non-vector type");
2138     return U;
2139   }
2140 
2141   // Otherwise, the value from the original loop has been vectorized and is
2142   // represented by UF vector values. Extract and return the requested scalar
2143   // value from the appropriate vector lane.
2144   return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane));
2145 }
2146 
2147 void InnerLoopVectorizer::packScalarIntoVectorValue(
2148     Value *V, const VPIteration &Instance) {
2149   assert(V != Induction && "The new induction variable should not be used.");
2150   assert(!V->getType()->isVectorTy() && "Can't pack a vector");
2151   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2152 
2153   Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance);
2154   Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part);
2155   VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst,
2156                                             Builder.getInt32(Instance.Lane));
2157   VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue);
2158 }
2159 
2160 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2161   assert(Vec->getType()->isVectorTy() && "Invalid type");
2162   SmallVector<int, 8> ShuffleMask;
2163   for (unsigned i = 0; i < VF; ++i)
2164     ShuffleMask.push_back(VF - i - 1);
2165 
2166   return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
2167                                      ShuffleMask, "reverse");
2168 }
2169 
2170 // Return whether we allow using masked interleave-groups (for dealing with
2171 // strided loads/stores that reside in predicated blocks, or for dealing
2172 // with gaps).
2173 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2174   // If an override option has been passed in for interleaved accesses, use it.
2175   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2176     return EnableMaskedInterleavedMemAccesses;
2177 
2178   return TTI.enableMaskedInterleavedAccessVectorization();
2179 }
2180 
2181 // Try to vectorize the interleave group that \p Instr belongs to.
2182 //
2183 // E.g. Translate following interleaved load group (factor = 3):
2184 //   for (i = 0; i < N; i+=3) {
2185 //     R = Pic[i];             // Member of index 0
2186 //     G = Pic[i+1];           // Member of index 1
2187 //     B = Pic[i+2];           // Member of index 2
2188 //     ... // do something to R, G, B
2189 //   }
2190 // To:
2191 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2192 //   %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9>   ; R elements
2193 //   %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10>  ; G elements
2194 //   %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11>  ; B elements
2195 //
2196 // Or translate following interleaved store group (factor = 3):
2197 //   for (i = 0; i < N; i+=3) {
2198 //     ... do something to R, G, B
2199 //     Pic[i]   = R;           // Member of index 0
2200 //     Pic[i+1] = G;           // Member of index 1
2201 //     Pic[i+2] = B;           // Member of index 2
2202 //   }
2203 // To:
2204 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2205 //   %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u>
2206 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2207 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2208 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2209 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2210     const InterleaveGroup<Instruction> *Group, VPTransformState &State,
2211     VPValue *Addr, VPValue *BlockInMask) {
2212   Instruction *Instr = Group->getInsertPos();
2213   const DataLayout &DL = Instr->getModule()->getDataLayout();
2214 
2215   // Prepare for the vector type of the interleaved load/store.
2216   Type *ScalarTy = getMemInstValueType(Instr);
2217   unsigned InterleaveFactor = Group->getFactor();
2218   auto *VecTy = FixedVectorType::get(ScalarTy, InterleaveFactor * VF);
2219 
2220   // Prepare for the new pointers.
2221   SmallVector<Value *, 2> AddrParts;
2222   unsigned Index = Group->getIndex(Instr);
2223 
2224   // TODO: extend the masked interleaved-group support to reversed access.
2225   assert((!BlockInMask || !Group->isReverse()) &&
2226          "Reversed masked interleave-group not supported.");
2227 
2228   // If the group is reverse, adjust the index to refer to the last vector lane
2229   // instead of the first. We adjust the index from the first vector lane,
2230   // rather than directly getting the pointer for lane VF - 1, because the
2231   // pointer operand of the interleaved access is supposed to be uniform. For
2232   // uniform instructions, we're only required to generate a value for the
2233   // first vector lane in each unroll iteration.
2234   if (Group->isReverse())
2235     Index += (VF - 1) * Group->getFactor();
2236 
2237   for (unsigned Part = 0; Part < UF; Part++) {
2238     Value *AddrPart = State.get(Addr, {Part, 0});
2239     setDebugLocFromInst(Builder, AddrPart);
2240 
2241     // Notice current instruction could be any index. Need to adjust the address
2242     // to the member of index 0.
2243     //
2244     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2245     //       b = A[i];       // Member of index 0
2246     // Current pointer is pointed to A[i+1], adjust it to A[i].
2247     //
2248     // E.g.  A[i+1] = a;     // Member of index 1
2249     //       A[i]   = b;     // Member of index 0
2250     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2251     // Current pointer is pointed to A[i+2], adjust it to A[i].
2252 
2253     bool InBounds = false;
2254     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2255       InBounds = gep->isInBounds();
2256     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2257     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2258 
2259     // Cast to the vector pointer type.
2260     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2261     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2262     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2263   }
2264 
2265   setDebugLocFromInst(Builder, Instr);
2266   Value *UndefVec = UndefValue::get(VecTy);
2267 
2268   Value *MaskForGaps = nullptr;
2269   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2270     MaskForGaps = createBitMaskForGaps(Builder, VF, *Group);
2271     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2272   }
2273 
2274   // Vectorize the interleaved load group.
2275   if (isa<LoadInst>(Instr)) {
2276     // For each unroll part, create a wide load for the group.
2277     SmallVector<Value *, 2> NewLoads;
2278     for (unsigned Part = 0; Part < UF; Part++) {
2279       Instruction *NewLoad;
2280       if (BlockInMask || MaskForGaps) {
2281         assert(useMaskedInterleavedAccesses(*TTI) &&
2282                "masked interleaved groups are not allowed.");
2283         Value *GroupMask = MaskForGaps;
2284         if (BlockInMask) {
2285           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2286           auto *Undefs = UndefValue::get(BlockInMaskPart->getType());
2287           Value *ShuffledMask = Builder.CreateShuffleVector(
2288               BlockInMaskPart, Undefs,
2289               createReplicatedMask(InterleaveFactor, VF), "interleaved.mask");
2290           GroupMask = MaskForGaps
2291                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2292                                                 MaskForGaps)
2293                           : ShuffledMask;
2294         }
2295         NewLoad =
2296             Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
2297                                      GroupMask, UndefVec, "wide.masked.vec");
2298       }
2299       else
2300         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2301                                             Group->getAlign(), "wide.vec");
2302       Group->addMetadata(NewLoad);
2303       NewLoads.push_back(NewLoad);
2304     }
2305 
2306     // For each member in the group, shuffle out the appropriate data from the
2307     // wide loads.
2308     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2309       Instruction *Member = Group->getMember(I);
2310 
2311       // Skip the gaps in the group.
2312       if (!Member)
2313         continue;
2314 
2315       auto StrideMask = createStrideMask(I, InterleaveFactor, VF);
2316       for (unsigned Part = 0; Part < UF; Part++) {
2317         Value *StridedVec = Builder.CreateShuffleVector(
2318             NewLoads[Part], UndefVec, StrideMask, "strided.vec");
2319 
2320         // If this member has different type, cast the result type.
2321         if (Member->getType() != ScalarTy) {
2322           VectorType *OtherVTy = FixedVectorType::get(Member->getType(), VF);
2323           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2324         }
2325 
2326         if (Group->isReverse())
2327           StridedVec = reverseVector(StridedVec);
2328 
2329         VectorLoopValueMap.setVectorValue(Member, Part, StridedVec);
2330       }
2331     }
2332     return;
2333   }
2334 
2335   // The sub vector type for current instruction.
2336   auto *SubVT = FixedVectorType::get(ScalarTy, VF);
2337 
2338   // Vectorize the interleaved store group.
2339   for (unsigned Part = 0; Part < UF; Part++) {
2340     // Collect the stored vector from each member.
2341     SmallVector<Value *, 4> StoredVecs;
2342     for (unsigned i = 0; i < InterleaveFactor; i++) {
2343       // Interleaved store group doesn't allow a gap, so each index has a member
2344       Instruction *Member = Group->getMember(i);
2345       assert(Member && "Fail to get a member from an interleaved store group");
2346 
2347       Value *StoredVec = getOrCreateVectorValue(
2348           cast<StoreInst>(Member)->getValueOperand(), Part);
2349       if (Group->isReverse())
2350         StoredVec = reverseVector(StoredVec);
2351 
2352       // If this member has different type, cast it to a unified type.
2353 
2354       if (StoredVec->getType() != SubVT)
2355         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2356 
2357       StoredVecs.push_back(StoredVec);
2358     }
2359 
2360     // Concatenate all vectors into a wide vector.
2361     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2362 
2363     // Interleave the elements in the wide vector.
2364     Value *IVec = Builder.CreateShuffleVector(
2365         WideVec, UndefVec, createInterleaveMask(VF, InterleaveFactor),
2366         "interleaved.vec");
2367 
2368     Instruction *NewStoreInstr;
2369     if (BlockInMask) {
2370       Value *BlockInMaskPart = State.get(BlockInMask, Part);
2371       auto *Undefs = UndefValue::get(BlockInMaskPart->getType());
2372       Value *ShuffledMask = Builder.CreateShuffleVector(
2373           BlockInMaskPart, Undefs, createReplicatedMask(InterleaveFactor, VF),
2374           "interleaved.mask");
2375       NewStoreInstr = Builder.CreateMaskedStore(
2376           IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2377     }
2378     else
2379       NewStoreInstr =
2380           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2381 
2382     Group->addMetadata(NewStoreInstr);
2383   }
2384 }
2385 
2386 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
2387                                                      VPTransformState &State,
2388                                                      VPValue *Addr,
2389                                                      VPValue *StoredValue,
2390                                                      VPValue *BlockInMask) {
2391   // Attempt to issue a wide load.
2392   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2393   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2394 
2395   assert((LI || SI) && "Invalid Load/Store instruction");
2396   assert((!SI || StoredValue) && "No stored value provided for widened store");
2397   assert((!LI || !StoredValue) && "Stored value provided for widened load");
2398 
2399   LoopVectorizationCostModel::InstWidening Decision =
2400       Cost->getWideningDecision(Instr, VF);
2401   assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2402           Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2403           Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2404          "CM decision is not to widen the memory instruction");
2405 
2406   Type *ScalarDataTy = getMemInstValueType(Instr);
2407   auto *DataTy = FixedVectorType::get(ScalarDataTy, VF);
2408   const Align Alignment = getLoadStoreAlignment(Instr);
2409 
2410   // Determine if the pointer operand of the access is either consecutive or
2411   // reverse consecutive.
2412   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2413   bool ConsecutiveStride =
2414       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2415   bool CreateGatherScatter =
2416       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2417 
2418   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2419   // gather/scatter. Otherwise Decision should have been to Scalarize.
2420   assert((ConsecutiveStride || CreateGatherScatter) &&
2421          "The instruction should be scalarized");
2422   (void)ConsecutiveStride;
2423 
2424   VectorParts BlockInMaskParts(UF);
2425   bool isMaskRequired = BlockInMask;
2426   if (isMaskRequired)
2427     for (unsigned Part = 0; Part < UF; ++Part)
2428       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2429 
2430   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2431     // Calculate the pointer for the specific unroll-part.
2432     GetElementPtrInst *PartPtr = nullptr;
2433 
2434     bool InBounds = false;
2435     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2436       InBounds = gep->isInBounds();
2437 
2438     if (Reverse) {
2439       // If the address is consecutive but reversed, then the
2440       // wide store needs to start at the last vector element.
2441       PartPtr = cast<GetElementPtrInst>(
2442           Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(-Part * VF)));
2443       PartPtr->setIsInBounds(InBounds);
2444       PartPtr = cast<GetElementPtrInst>(
2445           Builder.CreateGEP(ScalarDataTy, PartPtr, Builder.getInt32(1 - VF)));
2446       PartPtr->setIsInBounds(InBounds);
2447       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2448         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2449     } else {
2450       PartPtr = cast<GetElementPtrInst>(
2451           Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(Part * VF)));
2452       PartPtr->setIsInBounds(InBounds);
2453     }
2454 
2455     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2456     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2457   };
2458 
2459   // Handle Stores:
2460   if (SI) {
2461     setDebugLocFromInst(Builder, SI);
2462 
2463     for (unsigned Part = 0; Part < UF; ++Part) {
2464       Instruction *NewSI = nullptr;
2465       Value *StoredVal = State.get(StoredValue, Part);
2466       if (CreateGatherScatter) {
2467         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2468         Value *VectorGep = State.get(Addr, Part);
2469         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2470                                             MaskPart);
2471       } else {
2472         if (Reverse) {
2473           // If we store to reverse consecutive memory locations, then we need
2474           // to reverse the order of elements in the stored value.
2475           StoredVal = reverseVector(StoredVal);
2476           // We don't want to update the value in the map as it might be used in
2477           // another expression. So don't call resetVectorValue(StoredVal).
2478         }
2479         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
2480         if (isMaskRequired)
2481           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2482                                             BlockInMaskParts[Part]);
2483         else
2484           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2485       }
2486       addMetadata(NewSI, SI);
2487     }
2488     return;
2489   }
2490 
2491   // Handle loads.
2492   assert(LI && "Must have a load instruction");
2493   setDebugLocFromInst(Builder, LI);
2494   for (unsigned Part = 0; Part < UF; ++Part) {
2495     Value *NewLI;
2496     if (CreateGatherScatter) {
2497       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2498       Value *VectorGep = State.get(Addr, Part);
2499       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2500                                          nullptr, "wide.masked.gather");
2501       addMetadata(NewLI, LI);
2502     } else {
2503       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
2504       if (isMaskRequired)
2505         NewLI = Builder.CreateMaskedLoad(
2506             VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy),
2507             "wide.masked.load");
2508       else
2509         NewLI =
2510             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
2511 
2512       // Add metadata to the load, but setVectorValue to the reverse shuffle.
2513       addMetadata(NewLI, LI);
2514       if (Reverse)
2515         NewLI = reverseVector(NewLI);
2516     }
2517     VectorLoopValueMap.setVectorValue(Instr, Part, NewLI);
2518   }
2519 }
2520 
2521 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPUser &User,
2522                                                const VPIteration &Instance,
2523                                                bool IfPredicateInstr,
2524                                                VPTransformState &State) {
2525   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2526 
2527   setDebugLocFromInst(Builder, Instr);
2528 
2529   // Does this instruction return a value ?
2530   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2531 
2532   Instruction *Cloned = Instr->clone();
2533   if (!IsVoidRetTy)
2534     Cloned->setName(Instr->getName() + ".cloned");
2535 
2536   // Replace the operands of the cloned instructions with their scalar
2537   // equivalents in the new loop.
2538   for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) {
2539     auto *NewOp = State.get(User.getOperand(op), Instance);
2540     Cloned->setOperand(op, NewOp);
2541   }
2542   addNewMetadata(Cloned, Instr);
2543 
2544   // Place the cloned scalar in the new loop.
2545   Builder.Insert(Cloned);
2546 
2547   // Add the cloned scalar to the scalar map entry.
2548   VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned);
2549 
2550   // If we just cloned a new assumption, add it the assumption cache.
2551   if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
2552     if (II->getIntrinsicID() == Intrinsic::assume)
2553       AC->registerAssumption(II);
2554 
2555   // End if-block.
2556   if (IfPredicateInstr)
2557     PredicatedInstructions.push_back(Cloned);
2558 }
2559 
2560 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
2561                                                       Value *End, Value *Step,
2562                                                       Instruction *DL) {
2563   BasicBlock *Header = L->getHeader();
2564   BasicBlock *Latch = L->getLoopLatch();
2565   // As we're just creating this loop, it's possible no latch exists
2566   // yet. If so, use the header as this will be a single block loop.
2567   if (!Latch)
2568     Latch = Header;
2569 
2570   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
2571   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
2572   setDebugLocFromInst(Builder, OldInst);
2573   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
2574 
2575   Builder.SetInsertPoint(Latch->getTerminator());
2576   setDebugLocFromInst(Builder, OldInst);
2577 
2578   // Create i+1 and fill the PHINode.
2579   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
2580   Induction->addIncoming(Start, L->getLoopPreheader());
2581   Induction->addIncoming(Next, Latch);
2582   // Create the compare.
2583   Value *ICmp = Builder.CreateICmpEQ(Next, End);
2584   Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
2585 
2586   // Now we have two terminators. Remove the old one from the block.
2587   Latch->getTerminator()->eraseFromParent();
2588 
2589   return Induction;
2590 }
2591 
2592 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
2593   if (TripCount)
2594     return TripCount;
2595 
2596   assert(L && "Create Trip Count for null loop.");
2597   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2598   // Find the loop boundaries.
2599   ScalarEvolution *SE = PSE.getSE();
2600   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2601   assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
2602          "Invalid loop count");
2603 
2604   Type *IdxTy = Legal->getWidestInductionType();
2605   assert(IdxTy && "No type for induction");
2606 
2607   // The exit count might have the type of i64 while the phi is i32. This can
2608   // happen if we have an induction variable that is sign extended before the
2609   // compare. The only way that we get a backedge taken count is that the
2610   // induction variable was signed and as such will not overflow. In such a case
2611   // truncation is legal.
2612   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
2613       IdxTy->getPrimitiveSizeInBits())
2614     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2615   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2616 
2617   // Get the total trip count from the count by adding 1.
2618   const SCEV *ExitCount = SE->getAddExpr(
2619       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2620 
2621   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
2622 
2623   // Expand the trip count and place the new instructions in the preheader.
2624   // Notice that the pre-header does not change, only the loop body.
2625   SCEVExpander Exp(*SE, DL, "induction");
2626 
2627   // Count holds the overall loop count (N).
2628   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2629                                 L->getLoopPreheader()->getTerminator());
2630 
2631   if (TripCount->getType()->isPointerTy())
2632     TripCount =
2633         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2634                                     L->getLoopPreheader()->getTerminator());
2635 
2636   return TripCount;
2637 }
2638 
2639 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
2640   if (VectorTripCount)
2641     return VectorTripCount;
2642 
2643   Value *TC = getOrCreateTripCount(L);
2644   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2645 
2646   Type *Ty = TC->getType();
2647   Constant *Step = ConstantInt::get(Ty, VF * UF);
2648 
2649   // If the tail is to be folded by masking, round the number of iterations N
2650   // up to a multiple of Step instead of rounding down. This is done by first
2651   // adding Step-1 and then rounding down. Note that it's ok if this addition
2652   // overflows: the vector induction variable will eventually wrap to zero given
2653   // that it starts at zero and its Step is a power of two; the loop will then
2654   // exit, with the last early-exit vector comparison also producing all-true.
2655   if (Cost->foldTailByMasking()) {
2656     assert(isPowerOf2_32(VF * UF) &&
2657            "VF*UF must be a power of 2 when folding tail by masking");
2658     TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up");
2659   }
2660 
2661   // Now we need to generate the expression for the part of the loop that the
2662   // vectorized body will execute. This is equal to N - (N % Step) if scalar
2663   // iterations are not required for correctness, or N - Step, otherwise. Step
2664   // is equal to the vectorization factor (number of SIMD elements) times the
2665   // unroll factor (number of SIMD instructions).
2666   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2667 
2668   // If there is a non-reversed interleaved group that may speculatively access
2669   // memory out-of-bounds, we need to ensure that there will be at least one
2670   // iteration of the scalar epilogue loop. Thus, if the step evenly divides
2671   // the trip count, we set the remainder to be equal to the step. If the step
2672   // does not evenly divide the trip count, no adjustment is necessary since
2673   // there will already be scalar iterations. Note that the minimum iterations
2674   // check ensures that N >= Step.
2675   if (VF > 1 && Cost->requiresScalarEpilogue()) {
2676     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2677     R = Builder.CreateSelect(IsZero, Step, R);
2678   }
2679 
2680   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2681 
2682   return VectorTripCount;
2683 }
2684 
2685 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
2686                                                    const DataLayout &DL) {
2687   // Verify that V is a vector type with same number of elements as DstVTy.
2688   unsigned VF = DstVTy->getNumElements();
2689   VectorType *SrcVecTy = cast<VectorType>(V->getType());
2690   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
2691   Type *SrcElemTy = SrcVecTy->getElementType();
2692   Type *DstElemTy = DstVTy->getElementType();
2693   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2694          "Vector elements must have same size");
2695 
2696   // Do a direct cast if element types are castable.
2697   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2698     return Builder.CreateBitOrPointerCast(V, DstVTy);
2699   }
2700   // V cannot be directly casted to desired vector type.
2701   // May happen when V is a floating point vector but DstVTy is a vector of
2702   // pointers or vice-versa. Handle this using a two-step bitcast using an
2703   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2704   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2705          "Only one type should be a pointer type");
2706   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2707          "Only one type should be a floating point type");
2708   Type *IntTy =
2709       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2710   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
2711   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2712   return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
2713 }
2714 
2715 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
2716                                                          BasicBlock *Bypass) {
2717   Value *Count = getOrCreateTripCount(L);
2718   // Reuse existing vector loop preheader for TC checks.
2719   // Note that new preheader block is generated for vector loop.
2720   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
2721   IRBuilder<> Builder(TCCheckBlock->getTerminator());
2722 
2723   // Generate code to check if the loop's trip count is less than VF * UF, or
2724   // equal to it in case a scalar epilogue is required; this implies that the
2725   // vector trip count is zero. This check also covers the case where adding one
2726   // to the backedge-taken count overflowed leading to an incorrect trip count
2727   // of zero. In this case we will also jump to the scalar loop.
2728   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
2729                                           : ICmpInst::ICMP_ULT;
2730 
2731   // If tail is to be folded, vector loop takes care of all iterations.
2732   Value *CheckMinIters = Builder.getFalse();
2733   if (!Cost->foldTailByMasking())
2734     CheckMinIters = Builder.CreateICmp(
2735         P, Count, ConstantInt::get(Count->getType(), VF * UF),
2736         "min.iters.check");
2737 
2738   // Create new preheader for vector loop.
2739   LoopVectorPreHeader =
2740       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
2741                  "vector.ph");
2742 
2743   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
2744                                DT->getNode(Bypass)->getIDom()) &&
2745          "TC check is expected to dominate Bypass");
2746 
2747   // Update dominator for Bypass & LoopExit.
2748   DT->changeImmediateDominator(Bypass, TCCheckBlock);
2749   DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
2750 
2751   ReplaceInstWithInst(
2752       TCCheckBlock->getTerminator(),
2753       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
2754   LoopBypassBlocks.push_back(TCCheckBlock);
2755 }
2756 
2757 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
2758   // Reuse existing vector loop preheader for SCEV checks.
2759   // Note that new preheader block is generated for vector loop.
2760   BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader;
2761 
2762   // Generate the code to check that the SCEV assumptions that we made.
2763   // We want the new basic block to start at the first instruction in a
2764   // sequence of instructions that form a check.
2765   SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
2766                    "scev.check");
2767   Value *SCEVCheck = Exp.expandCodeForPredicate(
2768       &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator());
2769 
2770   if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
2771     if (C->isZero())
2772       return;
2773 
2774   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
2775            OptForSizeBasedOnProfile) &&
2776          "Cannot SCEV check stride or overflow when optimizing for size");
2777 
2778   SCEVCheckBlock->setName("vector.scevcheck");
2779   // Create new preheader for vector loop.
2780   LoopVectorPreHeader =
2781       SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI,
2782                  nullptr, "vector.ph");
2783 
2784   // Update dominator only if this is first RT check.
2785   if (LoopBypassBlocks.empty()) {
2786     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
2787     DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
2788   }
2789 
2790   ReplaceInstWithInst(
2791       SCEVCheckBlock->getTerminator(),
2792       BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck));
2793   LoopBypassBlocks.push_back(SCEVCheckBlock);
2794   AddedSafetyChecks = true;
2795 }
2796 
2797 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
2798   // VPlan-native path does not do any analysis for runtime checks currently.
2799   if (EnableVPlanNativePath)
2800     return;
2801 
2802   // Reuse existing vector loop preheader for runtime memory checks.
2803   // Note that new preheader block is generated for vector loop.
2804   BasicBlock *const MemCheckBlock = L->getLoopPreheader();
2805 
2806   // Generate the code that checks in runtime if arrays overlap. We put the
2807   // checks into a separate block to make the more common case of few elements
2808   // faster.
2809   auto *LAI = Legal->getLAI();
2810   const auto &RtPtrChecking = *LAI->getRuntimePointerChecking();
2811   if (!RtPtrChecking.Need)
2812     return;
2813   Instruction *FirstCheckInst;
2814   Instruction *MemRuntimeCheck;
2815   std::tie(FirstCheckInst, MemRuntimeCheck) =
2816       addRuntimeChecks(MemCheckBlock->getTerminator(), OrigLoop,
2817                        RtPtrChecking.getChecks(), RtPtrChecking.getSE());
2818   assert(MemRuntimeCheck && "no RT checks generated although RtPtrChecking "
2819                             "claimed checks are required");
2820 
2821   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
2822     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
2823            "Cannot emit memory checks when optimizing for size, unless forced "
2824            "to vectorize.");
2825     ORE->emit([&]() {
2826       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
2827                                         L->getStartLoc(), L->getHeader())
2828              << "Code-size may be reduced by not forcing "
2829                 "vectorization, or by source-code modifications "
2830                 "eliminating the need for runtime checks "
2831                 "(e.g., adding 'restrict').";
2832     });
2833   }
2834 
2835   MemCheckBlock->setName("vector.memcheck");
2836   // Create new preheader for vector loop.
2837   LoopVectorPreHeader =
2838       SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr,
2839                  "vector.ph");
2840 
2841   // Update dominator only if this is first RT check.
2842   if (LoopBypassBlocks.empty()) {
2843     DT->changeImmediateDominator(Bypass, MemCheckBlock);
2844     DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock);
2845   }
2846 
2847   ReplaceInstWithInst(
2848       MemCheckBlock->getTerminator(),
2849       BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheck));
2850   LoopBypassBlocks.push_back(MemCheckBlock);
2851   AddedSafetyChecks = true;
2852 
2853   // We currently don't use LoopVersioning for the actual loop cloning but we
2854   // still use it to add the noalias metadata.
2855   LVer = std::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
2856                                           PSE.getSE());
2857   LVer->prepareNoAliasMetadata();
2858 }
2859 
2860 Value *InnerLoopVectorizer::emitTransformedIndex(
2861     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
2862     const InductionDescriptor &ID) const {
2863 
2864   SCEVExpander Exp(*SE, DL, "induction");
2865   auto Step = ID.getStep();
2866   auto StartValue = ID.getStartValue();
2867   assert(Index->getType() == Step->getType() &&
2868          "Index type does not match StepValue type");
2869 
2870   // Note: the IR at this point is broken. We cannot use SE to create any new
2871   // SCEV and then expand it, hoping that SCEV's simplification will give us
2872   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2873   // lead to various SCEV crashes. So all we can do is to use builder and rely
2874   // on InstCombine for future simplifications. Here we handle some trivial
2875   // cases only.
2876   auto CreateAdd = [&B](Value *X, Value *Y) {
2877     assert(X->getType() == Y->getType() && "Types don't match!");
2878     if (auto *CX = dyn_cast<ConstantInt>(X))
2879       if (CX->isZero())
2880         return Y;
2881     if (auto *CY = dyn_cast<ConstantInt>(Y))
2882       if (CY->isZero())
2883         return X;
2884     return B.CreateAdd(X, Y);
2885   };
2886 
2887   auto CreateMul = [&B](Value *X, Value *Y) {
2888     assert(X->getType() == Y->getType() && "Types don't match!");
2889     if (auto *CX = dyn_cast<ConstantInt>(X))
2890       if (CX->isOne())
2891         return Y;
2892     if (auto *CY = dyn_cast<ConstantInt>(Y))
2893       if (CY->isOne())
2894         return X;
2895     return B.CreateMul(X, Y);
2896   };
2897 
2898   // Get a suitable insert point for SCEV expansion. For blocks in the vector
2899   // loop, choose the end of the vector loop header (=LoopVectorBody), because
2900   // the DomTree is not kept up-to-date for additional blocks generated in the
2901   // vector loop. By using the header as insertion point, we guarantee that the
2902   // expanded instructions dominate all their uses.
2903   auto GetInsertPoint = [this, &B]() {
2904     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
2905     if (InsertBB != LoopVectorBody &&
2906         LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB))
2907       return LoopVectorBody->getTerminator();
2908     return &*B.GetInsertPoint();
2909   };
2910   switch (ID.getKind()) {
2911   case InductionDescriptor::IK_IntInduction: {
2912     assert(Index->getType() == StartValue->getType() &&
2913            "Index type does not match StartValue type");
2914     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
2915       return B.CreateSub(StartValue, Index);
2916     auto *Offset = CreateMul(
2917         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
2918     return CreateAdd(StartValue, Offset);
2919   }
2920   case InductionDescriptor::IK_PtrInduction: {
2921     assert(isa<SCEVConstant>(Step) &&
2922            "Expected constant step for pointer induction");
2923     return B.CreateGEP(
2924         StartValue->getType()->getPointerElementType(), StartValue,
2925         CreateMul(Index,
2926                   Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())));
2927   }
2928   case InductionDescriptor::IK_FpInduction: {
2929     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2930     auto InductionBinOp = ID.getInductionBinOp();
2931     assert(InductionBinOp &&
2932            (InductionBinOp->getOpcode() == Instruction::FAdd ||
2933             InductionBinOp->getOpcode() == Instruction::FSub) &&
2934            "Original bin op should be defined for FP induction");
2935 
2936     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
2937 
2938     // Floating point operations had to be 'fast' to enable the induction.
2939     FastMathFlags Flags;
2940     Flags.setFast();
2941 
2942     Value *MulExp = B.CreateFMul(StepValue, Index);
2943     if (isa<Instruction>(MulExp))
2944       // We have to check, the MulExp may be a constant.
2945       cast<Instruction>(MulExp)->setFastMathFlags(Flags);
2946 
2947     Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2948                                "induction");
2949     if (isa<Instruction>(BOp))
2950       cast<Instruction>(BOp)->setFastMathFlags(Flags);
2951 
2952     return BOp;
2953   }
2954   case InductionDescriptor::IK_NoInduction:
2955     return nullptr;
2956   }
2957   llvm_unreachable("invalid enum");
2958 }
2959 
2960 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
2961   /*
2962    In this function we generate a new loop. The new loop will contain
2963    the vectorized instructions while the old loop will continue to run the
2964    scalar remainder.
2965 
2966        [ ] <-- loop iteration number check.
2967     /   |
2968    /    v
2969   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
2970   |  /  |
2971   | /   v
2972   ||   [ ]     <-- vector pre header.
2973   |/    |
2974   |     v
2975   |    [  ] \
2976   |    [  ]_|   <-- vector loop.
2977   |     |
2978   |     v
2979   |   -[ ]   <--- middle-block.
2980   |  /  |
2981   | /   v
2982   -|- >[ ]     <--- new preheader.
2983    |    |
2984    |    v
2985    |   [ ] \
2986    |   [ ]_|   <-- old scalar loop to handle remainder.
2987     \   |
2988      \  v
2989       >[ ]     <-- exit block.
2990    ...
2991    */
2992 
2993   MDNode *OrigLoopID = OrigLoop->getLoopID();
2994 
2995   // Some loops have a single integer induction variable, while other loops
2996   // don't. One example is c++ iterators that often have multiple pointer
2997   // induction variables. In the code below we also support a case where we
2998   // don't have a single induction variable.
2999   //
3000   // We try to obtain an induction variable from the original loop as hard
3001   // as possible. However if we don't find one that:
3002   //   - is an integer
3003   //   - counts from zero, stepping by one
3004   //   - is the size of the widest induction variable type
3005   // then we create a new one.
3006   OldInduction = Legal->getPrimaryInduction();
3007   Type *IdxTy = Legal->getWidestInductionType();
3008 
3009   // Split the single block loop into the two loop structure described above.
3010   LoopScalarBody = OrigLoop->getHeader();
3011   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3012   LoopExitBlock = OrigLoop->getExitBlock();
3013   assert(LoopExitBlock && "Must have an exit block");
3014   assert(LoopVectorPreHeader && "Invalid loop structure");
3015 
3016   LoopMiddleBlock =
3017       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3018                  LI, nullptr, "middle.block");
3019   LoopScalarPreHeader =
3020       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3021                  nullptr, "scalar.ph");
3022   // We intentionally don't let SplitBlock to update LoopInfo since
3023   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3024   // LoopVectorBody is explicitly added to the correct place few lines later.
3025   LoopVectorBody =
3026       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3027                  nullptr, nullptr, "vector.body");
3028 
3029   // Update dominator for loop exit.
3030   DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3031 
3032   // Create and register the new vector loop.
3033   Loop *Lp = LI->AllocateLoop();
3034   Loop *ParentLoop = OrigLoop->getParentLoop();
3035 
3036   // Insert the new loop into the loop nest and register the new basic blocks
3037   // before calling any utilities such as SCEV that require valid LoopInfo.
3038   if (ParentLoop) {
3039     ParentLoop->addChildLoop(Lp);
3040   } else {
3041     LI->addTopLevelLoop(Lp);
3042   }
3043   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3044 
3045   // Find the loop boundaries.
3046   Value *Count = getOrCreateTripCount(Lp);
3047 
3048   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3049 
3050   // Now, compare the new count to zero. If it is zero skip the vector loop and
3051   // jump to the scalar loop. This check also covers the case where the
3052   // backedge-taken count is uint##_max: adding one to it will overflow leading
3053   // to an incorrect trip count of zero. In this (rare) case we will also jump
3054   // to the scalar loop.
3055   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3056 
3057   // Generate the code to check any assumptions that we've made for SCEV
3058   // expressions.
3059   emitSCEVChecks(Lp, LoopScalarPreHeader);
3060 
3061   // Generate the code that checks in runtime if arrays overlap. We put the
3062   // checks into a separate block to make the more common case of few elements
3063   // faster.
3064   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3065 
3066   // Generate the induction variable.
3067   // The loop step is equal to the vectorization factor (num of SIMD elements)
3068   // times the unroll factor (num of SIMD instructions).
3069   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3070   Constant *Step = ConstantInt::get(IdxTy, VF * UF);
3071   Induction =
3072       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3073                               getDebugLocFromInstOrOperands(OldInduction));
3074 
3075   // We are going to resume the execution of the scalar loop.
3076   // Go over all of the induction variables that we found and fix the
3077   // PHIs that are left in the scalar version of the loop.
3078   // The starting values of PHI nodes depend on the counter of the last
3079   // iteration in the vectorized loop.
3080   // If we come from a bypass edge then we need to start from the original
3081   // start value.
3082 
3083   // This variable saves the new starting index for the scalar loop. It is used
3084   // to test if there are any tail iterations left once the vector loop has
3085   // completed.
3086   for (auto &InductionEntry : Legal->getInductionVars()) {
3087     PHINode *OrigPhi = InductionEntry.first;
3088     InductionDescriptor II = InductionEntry.second;
3089 
3090     // Create phi nodes to merge from the  backedge-taken check block.
3091     PHINode *BCResumeVal =
3092         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3093                         LoopScalarPreHeader->getTerminator());
3094     // Copy original phi DL over to the new one.
3095     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3096     Value *&EndValue = IVEndValues[OrigPhi];
3097     if (OrigPhi == OldInduction) {
3098       // We know what the end value is.
3099       EndValue = CountRoundDown;
3100     } else {
3101       IRBuilder<> B(Lp->getLoopPreheader()->getTerminator());
3102       Type *StepType = II.getStep()->getType();
3103       Instruction::CastOps CastOp =
3104           CastInst::getCastOpcode(CountRoundDown, true, StepType, true);
3105       Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd");
3106       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3107       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3108       EndValue->setName("ind.end");
3109     }
3110 
3111     // The new PHI merges the original incoming value, in case of a bypass,
3112     // or the value at the end of the vectorized loop.
3113     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3114 
3115     // Fix the scalar body counter (PHI node).
3116     // The old induction's phi node in the scalar body needs the truncated
3117     // value.
3118     for (BasicBlock *BB : LoopBypassBlocks)
3119       BCResumeVal->addIncoming(II.getStartValue(), BB);
3120     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3121   }
3122 
3123   // We need the OrigLoop (scalar loop part) latch terminator to help
3124   // produce correct debug info for the middle block BB instructions.
3125   // The legality check stage guarantees that the loop will have a single
3126   // latch.
3127   assert(isa<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()) &&
3128          "Scalar loop latch terminator isn't a branch");
3129   BranchInst *ScalarLatchBr =
3130       cast<BranchInst>(OrigLoop->getLoopLatch()->getTerminator());
3131 
3132   // Add a check in the middle block to see if we have completed
3133   // all of the iterations in the first vector loop.
3134   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3135   // If tail is to be folded, we know we don't need to run the remainder.
3136   Value *CmpN = Builder.getTrue();
3137   if (!Cost->foldTailByMasking()) {
3138     CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
3139                            CountRoundDown, "cmp.n",
3140                            LoopMiddleBlock->getTerminator());
3141 
3142     // Here we use the same DebugLoc as the scalar loop latch branch instead
3143     // of the corresponding compare because they may have ended up with
3144     // different line numbers and we want to avoid awkward line stepping while
3145     // debugging. Eg. if the compare has got a line number inside the loop.
3146     cast<Instruction>(CmpN)->setDebugLoc(ScalarLatchBr->getDebugLoc());
3147   }
3148 
3149   BranchInst *BrInst =
3150       BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, CmpN);
3151   BrInst->setDebugLoc(ScalarLatchBr->getDebugLoc());
3152   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3153 
3154   // Get ready to start creating new instructions into the vectorized body.
3155   assert(LoopVectorPreHeader == Lp->getLoopPreheader() &&
3156          "Inconsistent vector loop preheader");
3157   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3158 
3159   Optional<MDNode *> VectorizedLoopID =
3160       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3161                                       LLVMLoopVectorizeFollowupVectorized});
3162   if (VectorizedLoopID.hasValue()) {
3163     Lp->setLoopID(VectorizedLoopID.getValue());
3164 
3165     // Do not setAlreadyVectorized if loop attributes have been defined
3166     // explicitly.
3167     return LoopVectorPreHeader;
3168   }
3169 
3170   // Keep all loop hints from the original loop on the vector loop (we'll
3171   // replace the vectorizer-specific hints below).
3172   if (MDNode *LID = OrigLoop->getLoopID())
3173     Lp->setLoopID(LID);
3174 
3175   LoopVectorizeHints Hints(Lp, true, *ORE);
3176   Hints.setAlreadyVectorized();
3177 
3178 #ifdef EXPENSIVE_CHECKS
3179   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3180   LI->verify(*DT);
3181 #endif
3182 
3183   return LoopVectorPreHeader;
3184 }
3185 
3186 // Fix up external users of the induction variable. At this point, we are
3187 // in LCSSA form, with all external PHIs that use the IV having one input value,
3188 // coming from the remainder loop. We need those PHIs to also have a correct
3189 // value for the IV when arriving directly from the middle block.
3190 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3191                                        const InductionDescriptor &II,
3192                                        Value *CountRoundDown, Value *EndValue,
3193                                        BasicBlock *MiddleBlock) {
3194   // There are two kinds of external IV usages - those that use the value
3195   // computed in the last iteration (the PHI) and those that use the penultimate
3196   // value (the value that feeds into the phi from the loop latch).
3197   // We allow both, but they, obviously, have different values.
3198 
3199   assert(OrigLoop->getExitBlock() && "Expected a single exit block");
3200 
3201   DenseMap<Value *, Value *> MissingVals;
3202 
3203   // An external user of the last iteration's value should see the value that
3204   // the remainder loop uses to initialize its own IV.
3205   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3206   for (User *U : PostInc->users()) {
3207     Instruction *UI = cast<Instruction>(U);
3208     if (!OrigLoop->contains(UI)) {
3209       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3210       MissingVals[UI] = EndValue;
3211     }
3212   }
3213 
3214   // An external user of the penultimate value need to see EndValue - Step.
3215   // The simplest way to get this is to recompute it from the constituent SCEVs,
3216   // that is Start + (Step * (CRD - 1)).
3217   for (User *U : OrigPhi->users()) {
3218     auto *UI = cast<Instruction>(U);
3219     if (!OrigLoop->contains(UI)) {
3220       const DataLayout &DL =
3221           OrigLoop->getHeader()->getModule()->getDataLayout();
3222       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3223 
3224       IRBuilder<> B(MiddleBlock->getTerminator());
3225       Value *CountMinusOne = B.CreateSub(
3226           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3227       Value *CMO =
3228           !II.getStep()->getType()->isIntegerTy()
3229               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3230                              II.getStep()->getType())
3231               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3232       CMO->setName("cast.cmo");
3233       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3234       Escape->setName("ind.escape");
3235       MissingVals[UI] = Escape;
3236     }
3237   }
3238 
3239   for (auto &I : MissingVals) {
3240     PHINode *PHI = cast<PHINode>(I.first);
3241     // One corner case we have to handle is two IVs "chasing" each-other,
3242     // that is %IV2 = phi [...], [ %IV1, %latch ]
3243     // In this case, if IV1 has an external use, we need to avoid adding both
3244     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3245     // don't already have an incoming value for the middle block.
3246     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3247       PHI->addIncoming(I.second, MiddleBlock);
3248   }
3249 }
3250 
3251 namespace {
3252 
3253 struct CSEDenseMapInfo {
3254   static bool canHandle(const Instruction *I) {
3255     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3256            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3257   }
3258 
3259   static inline Instruction *getEmptyKey() {
3260     return DenseMapInfo<Instruction *>::getEmptyKey();
3261   }
3262 
3263   static inline Instruction *getTombstoneKey() {
3264     return DenseMapInfo<Instruction *>::getTombstoneKey();
3265   }
3266 
3267   static unsigned getHashValue(const Instruction *I) {
3268     assert(canHandle(I) && "Unknown instruction!");
3269     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3270                                                            I->value_op_end()));
3271   }
3272 
3273   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3274     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3275         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3276       return LHS == RHS;
3277     return LHS->isIdenticalTo(RHS);
3278   }
3279 };
3280 
3281 } // end anonymous namespace
3282 
3283 ///Perform cse of induction variable instructions.
3284 static void cse(BasicBlock *BB) {
3285   // Perform simple cse.
3286   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3287   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3288     Instruction *In = &*I++;
3289 
3290     if (!CSEDenseMapInfo::canHandle(In))
3291       continue;
3292 
3293     // Check if we can replace this instruction with any of the
3294     // visited instructions.
3295     if (Instruction *V = CSEMap.lookup(In)) {
3296       In->replaceAllUsesWith(V);
3297       In->eraseFromParent();
3298       continue;
3299     }
3300 
3301     CSEMap[In] = In;
3302   }
3303 }
3304 
3305 unsigned LoopVectorizationCostModel::getVectorCallCost(CallInst *CI,
3306                                                        unsigned VF,
3307                                                        bool &NeedToScalarize) {
3308   Function *F = CI->getCalledFunction();
3309   Type *ScalarRetTy = CI->getType();
3310   SmallVector<Type *, 4> Tys, ScalarTys;
3311   for (auto &ArgOp : CI->arg_operands())
3312     ScalarTys.push_back(ArgOp->getType());
3313 
3314   // Estimate cost of scalarized vector call. The source operands are assumed
3315   // to be vectors, so we need to extract individual elements from there,
3316   // execute VF scalar calls, and then gather the result into the vector return
3317   // value.
3318   unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys,
3319                                                  TTI::TCK_RecipThroughput);
3320   if (VF == 1)
3321     return ScalarCallCost;
3322 
3323   // Compute corresponding vector type for return value and arguments.
3324   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3325   for (Type *ScalarTy : ScalarTys)
3326     Tys.push_back(ToVectorTy(ScalarTy, VF));
3327 
3328   // Compute costs of unpacking argument values for the scalar calls and
3329   // packing the return values to a vector.
3330   unsigned ScalarizationCost = getScalarizationOverhead(CI, VF);
3331 
3332   unsigned Cost = ScalarCallCost * VF + ScalarizationCost;
3333 
3334   // If we can't emit a vector call for this function, then the currently found
3335   // cost is the cost we need to return.
3336   NeedToScalarize = true;
3337   VFShape Shape = VFShape::get(*CI, {VF, false}, false /*HasGlobalPred*/);
3338   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3339 
3340   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3341     return Cost;
3342 
3343   // If the corresponding vector cost is cheaper, return its cost.
3344   unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys,
3345                                                  TTI::TCK_RecipThroughput);
3346   if (VectorCallCost < Cost) {
3347     NeedToScalarize = false;
3348     return VectorCallCost;
3349   }
3350   return Cost;
3351 }
3352 
3353 unsigned LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3354                                                             unsigned VF) {
3355   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3356   assert(ID && "Expected intrinsic call!");
3357 
3358   IntrinsicCostAttributes CostAttrs(ID, *CI, VF);
3359   return TTI.getIntrinsicInstrCost(CostAttrs,
3360                                    TargetTransformInfo::TCK_RecipThroughput);
3361 }
3362 
3363 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3364   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3365   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3366   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3367 }
3368 
3369 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3370   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3371   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3372   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3373 }
3374 
3375 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
3376   // For every instruction `I` in MinBWs, truncate the operands, create a
3377   // truncated version of `I` and reextend its result. InstCombine runs
3378   // later and will remove any ext/trunc pairs.
3379   SmallPtrSet<Value *, 4> Erased;
3380   for (const auto &KV : Cost->getMinimalBitwidths()) {
3381     // If the value wasn't vectorized, we must maintain the original scalar
3382     // type. The absence of the value from VectorLoopValueMap indicates that it
3383     // wasn't vectorized.
3384     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3385       continue;
3386     for (unsigned Part = 0; Part < UF; ++Part) {
3387       Value *I = getOrCreateVectorValue(KV.first, Part);
3388       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3389         continue;
3390       Type *OriginalTy = I->getType();
3391       Type *ScalarTruncatedTy =
3392           IntegerType::get(OriginalTy->getContext(), KV.second);
3393       auto *TruncatedTy = FixedVectorType::get(
3394           ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getNumElements());
3395       if (TruncatedTy == OriginalTy)
3396         continue;
3397 
3398       IRBuilder<> B(cast<Instruction>(I));
3399       auto ShrinkOperand = [&](Value *V) -> Value * {
3400         if (auto *ZI = dyn_cast<ZExtInst>(V))
3401           if (ZI->getSrcTy() == TruncatedTy)
3402             return ZI->getOperand(0);
3403         return B.CreateZExtOrTrunc(V, TruncatedTy);
3404       };
3405 
3406       // The actual instruction modification depends on the instruction type,
3407       // unfortunately.
3408       Value *NewI = nullptr;
3409       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3410         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3411                              ShrinkOperand(BO->getOperand(1)));
3412 
3413         // Any wrapping introduced by shrinking this operation shouldn't be
3414         // considered undefined behavior. So, we can't unconditionally copy
3415         // arithmetic wrapping flags to NewI.
3416         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3417       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3418         NewI =
3419             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3420                          ShrinkOperand(CI->getOperand(1)));
3421       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3422         NewI = B.CreateSelect(SI->getCondition(),
3423                               ShrinkOperand(SI->getTrueValue()),
3424                               ShrinkOperand(SI->getFalseValue()));
3425       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3426         switch (CI->getOpcode()) {
3427         default:
3428           llvm_unreachable("Unhandled cast!");
3429         case Instruction::Trunc:
3430           NewI = ShrinkOperand(CI->getOperand(0));
3431           break;
3432         case Instruction::SExt:
3433           NewI = B.CreateSExtOrTrunc(
3434               CI->getOperand(0),
3435               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3436           break;
3437         case Instruction::ZExt:
3438           NewI = B.CreateZExtOrTrunc(
3439               CI->getOperand(0),
3440               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3441           break;
3442         }
3443       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3444         auto Elements0 =
3445             cast<VectorType>(SI->getOperand(0)->getType())->getNumElements();
3446         auto *O0 = B.CreateZExtOrTrunc(
3447             SI->getOperand(0),
3448             FixedVectorType::get(ScalarTruncatedTy, Elements0));
3449         auto Elements1 =
3450             cast<VectorType>(SI->getOperand(1)->getType())->getNumElements();
3451         auto *O1 = B.CreateZExtOrTrunc(
3452             SI->getOperand(1),
3453             FixedVectorType::get(ScalarTruncatedTy, Elements1));
3454 
3455         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3456       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3457         // Don't do anything with the operands, just extend the result.
3458         continue;
3459       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3460         auto Elements =
3461             cast<VectorType>(IE->getOperand(0)->getType())->getNumElements();
3462         auto *O0 = B.CreateZExtOrTrunc(
3463             IE->getOperand(0),
3464             FixedVectorType::get(ScalarTruncatedTy, Elements));
3465         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3466         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3467       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3468         auto Elements =
3469             cast<VectorType>(EE->getOperand(0)->getType())->getNumElements();
3470         auto *O0 = B.CreateZExtOrTrunc(
3471             EE->getOperand(0),
3472             FixedVectorType::get(ScalarTruncatedTy, Elements));
3473         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3474       } else {
3475         // If we don't know what to do, be conservative and don't do anything.
3476         continue;
3477       }
3478 
3479       // Lastly, extend the result.
3480       NewI->takeName(cast<Instruction>(I));
3481       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3482       I->replaceAllUsesWith(Res);
3483       cast<Instruction>(I)->eraseFromParent();
3484       Erased.insert(I);
3485       VectorLoopValueMap.resetVectorValue(KV.first, Part, Res);
3486     }
3487   }
3488 
3489   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3490   for (const auto &KV : Cost->getMinimalBitwidths()) {
3491     // If the value wasn't vectorized, we must maintain the original scalar
3492     // type. The absence of the value from VectorLoopValueMap indicates that it
3493     // wasn't vectorized.
3494     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3495       continue;
3496     for (unsigned Part = 0; Part < UF; ++Part) {
3497       Value *I = getOrCreateVectorValue(KV.first, Part);
3498       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3499       if (Inst && Inst->use_empty()) {
3500         Value *NewI = Inst->getOperand(0);
3501         Inst->eraseFromParent();
3502         VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI);
3503       }
3504     }
3505   }
3506 }
3507 
3508 void InnerLoopVectorizer::fixVectorizedLoop() {
3509   // Insert truncates and extends for any truncated instructions as hints to
3510   // InstCombine.
3511   if (VF > 1)
3512     truncateToMinimalBitwidths();
3513 
3514   // Fix widened non-induction PHIs by setting up the PHI operands.
3515   if (OrigPHIsToFix.size()) {
3516     assert(EnableVPlanNativePath &&
3517            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3518     fixNonInductionPHIs();
3519   }
3520 
3521   // At this point every instruction in the original loop is widened to a
3522   // vector form. Now we need to fix the recurrences in the loop. These PHI
3523   // nodes are currently empty because we did not want to introduce cycles.
3524   // This is the second stage of vectorizing recurrences.
3525   fixCrossIterationPHIs();
3526 
3527   // Forget the original basic block.
3528   PSE.getSE()->forgetLoop(OrigLoop);
3529 
3530   // Fix-up external users of the induction variables.
3531   for (auto &Entry : Legal->getInductionVars())
3532     fixupIVUsers(Entry.first, Entry.second,
3533                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3534                  IVEndValues[Entry.first], LoopMiddleBlock);
3535 
3536   fixLCSSAPHIs();
3537   for (Instruction *PI : PredicatedInstructions)
3538     sinkScalarOperands(&*PI);
3539 
3540   // Remove redundant induction instructions.
3541   cse(LoopVectorBody);
3542 
3543   // Set/update profile weights for the vector and remainder loops as original
3544   // loop iterations are now distributed among them. Note that original loop
3545   // represented by LoopScalarBody becomes remainder loop after vectorization.
3546   //
3547   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
3548   // end up getting slightly roughened result but that should be OK since
3549   // profile is not inherently precise anyway. Note also possible bypass of
3550   // vector code caused by legality checks is ignored, assigning all the weight
3551   // to the vector loop, optimistically.
3552   setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody),
3553                                LI->getLoopFor(LoopVectorBody),
3554                                LI->getLoopFor(LoopScalarBody), VF * UF);
3555 }
3556 
3557 void InnerLoopVectorizer::fixCrossIterationPHIs() {
3558   // In order to support recurrences we need to be able to vectorize Phi nodes.
3559   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3560   // stage #2: We now need to fix the recurrences by adding incoming edges to
3561   // the currently empty PHI nodes. At this point every instruction in the
3562   // original loop is widened to a vector form so we can use them to construct
3563   // the incoming edges.
3564   for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
3565     // Handle first-order recurrences and reductions that need to be fixed.
3566     if (Legal->isFirstOrderRecurrence(&Phi))
3567       fixFirstOrderRecurrence(&Phi);
3568     else if (Legal->isReductionVariable(&Phi))
3569       fixReduction(&Phi);
3570   }
3571 }
3572 
3573 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
3574   // This is the second phase of vectorizing first-order recurrences. An
3575   // overview of the transformation is described below. Suppose we have the
3576   // following loop.
3577   //
3578   //   for (int i = 0; i < n; ++i)
3579   //     b[i] = a[i] - a[i - 1];
3580   //
3581   // There is a first-order recurrence on "a". For this loop, the shorthand
3582   // scalar IR looks like:
3583   //
3584   //   scalar.ph:
3585   //     s_init = a[-1]
3586   //     br scalar.body
3587   //
3588   //   scalar.body:
3589   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3590   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3591   //     s2 = a[i]
3592   //     b[i] = s2 - s1
3593   //     br cond, scalar.body, ...
3594   //
3595   // In this example, s1 is a recurrence because it's value depends on the
3596   // previous iteration. In the first phase of vectorization, we created a
3597   // temporary value for s1. We now complete the vectorization and produce the
3598   // shorthand vector IR shown below (for VF = 4, UF = 1).
3599   //
3600   //   vector.ph:
3601   //     v_init = vector(..., ..., ..., a[-1])
3602   //     br vector.body
3603   //
3604   //   vector.body
3605   //     i = phi [0, vector.ph], [i+4, vector.body]
3606   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
3607   //     v2 = a[i, i+1, i+2, i+3];
3608   //     v3 = vector(v1(3), v2(0, 1, 2))
3609   //     b[i, i+1, i+2, i+3] = v2 - v3
3610   //     br cond, vector.body, middle.block
3611   //
3612   //   middle.block:
3613   //     x = v2(3)
3614   //     br scalar.ph
3615   //
3616   //   scalar.ph:
3617   //     s_init = phi [x, middle.block], [a[-1], otherwise]
3618   //     br scalar.body
3619   //
3620   // After execution completes the vector loop, we extract the next value of
3621   // the recurrence (x) to use as the initial value in the scalar loop.
3622 
3623   // Get the original loop preheader and single loop latch.
3624   auto *Preheader = OrigLoop->getLoopPreheader();
3625   auto *Latch = OrigLoop->getLoopLatch();
3626 
3627   // Get the initial and previous values of the scalar recurrence.
3628   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
3629   auto *Previous = Phi->getIncomingValueForBlock(Latch);
3630 
3631   // Create a vector from the initial value.
3632   auto *VectorInit = ScalarInit;
3633   if (VF > 1) {
3634     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3635     VectorInit = Builder.CreateInsertElement(
3636         UndefValue::get(FixedVectorType::get(VectorInit->getType(), VF)),
3637         VectorInit, Builder.getInt32(VF - 1), "vector.recur.init");
3638   }
3639 
3640   // We constructed a temporary phi node in the first phase of vectorization.
3641   // This phi node will eventually be deleted.
3642   Builder.SetInsertPoint(
3643       cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0)));
3644 
3645   // Create a phi node for the new recurrence. The current value will either be
3646   // the initial value inserted into a vector or loop-varying vector value.
3647   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
3648   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
3649 
3650   // Get the vectorized previous value of the last part UF - 1. It appears last
3651   // among all unrolled iterations, due to the order of their construction.
3652   Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1);
3653 
3654   // Find and set the insertion point after the previous value if it is an
3655   // instruction.
3656   BasicBlock::iterator InsertPt;
3657   // Note that the previous value may have been constant-folded so it is not
3658   // guaranteed to be an instruction in the vector loop.
3659   // FIXME: Loop invariant values do not form recurrences. We should deal with
3660   //        them earlier.
3661   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
3662     InsertPt = LoopVectorBody->getFirstInsertionPt();
3663   else {
3664     Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
3665     if (isa<PHINode>(PreviousLastPart))
3666       // If the previous value is a phi node, we should insert after all the phi
3667       // nodes in the block containing the PHI to avoid breaking basic block
3668       // verification. Note that the basic block may be different to
3669       // LoopVectorBody, in case we predicate the loop.
3670       InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
3671     else
3672       InsertPt = ++PreviousInst->getIterator();
3673   }
3674   Builder.SetInsertPoint(&*InsertPt);
3675 
3676   // We will construct a vector for the recurrence by combining the values for
3677   // the current and previous iterations. This is the required shuffle mask.
3678   SmallVector<int, 8> ShuffleMask(VF);
3679   ShuffleMask[0] = VF - 1;
3680   for (unsigned I = 1; I < VF; ++I)
3681     ShuffleMask[I] = I + VF - 1;
3682 
3683   // The vector from which to take the initial value for the current iteration
3684   // (actual or unrolled). Initially, this is the vector phi node.
3685   Value *Incoming = VecPhi;
3686 
3687   // Shuffle the current and previous vector and update the vector parts.
3688   for (unsigned Part = 0; Part < UF; ++Part) {
3689     Value *PreviousPart = getOrCreateVectorValue(Previous, Part);
3690     Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part);
3691     auto *Shuffle = VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart,
3692                                                          ShuffleMask)
3693                            : Incoming;
3694     PhiPart->replaceAllUsesWith(Shuffle);
3695     cast<Instruction>(PhiPart)->eraseFromParent();
3696     VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle);
3697     Incoming = PreviousPart;
3698   }
3699 
3700   // Fix the latch value of the new recurrence in the vector loop.
3701   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3702 
3703   // Extract the last vector element in the middle block. This will be the
3704   // initial value for the recurrence when jumping to the scalar loop.
3705   auto *ExtractForScalar = Incoming;
3706   if (VF > 1) {
3707     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3708     ExtractForScalar = Builder.CreateExtractElement(
3709         ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract");
3710   }
3711   // Extract the second last element in the middle block if the
3712   // Phi is used outside the loop. We need to extract the phi itself
3713   // and not the last element (the phi update in the current iteration). This
3714   // will be the value when jumping to the exit block from the LoopMiddleBlock,
3715   // when the scalar loop is not run at all.
3716   Value *ExtractForPhiUsedOutsideLoop = nullptr;
3717   if (VF > 1)
3718     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
3719         Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi");
3720   // When loop is unrolled without vectorizing, initialize
3721   // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
3722   // `Incoming`. This is analogous to the vectorized case above: extracting the
3723   // second last element when VF > 1.
3724   else if (UF > 1)
3725     ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2);
3726 
3727   // Fix the initial value of the original recurrence in the scalar loop.
3728   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
3729   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
3730   for (auto *BB : predecessors(LoopScalarPreHeader)) {
3731     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
3732     Start->addIncoming(Incoming, BB);
3733   }
3734 
3735   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
3736   Phi->setName("scalar.recur");
3737 
3738   // Finally, fix users of the recurrence outside the loop. The users will need
3739   // either the last value of the scalar recurrence or the last value of the
3740   // vector recurrence we extracted in the middle block. Since the loop is in
3741   // LCSSA form, we just need to find all the phi nodes for the original scalar
3742   // recurrence in the exit block, and then add an edge for the middle block.
3743   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3744     if (LCSSAPhi.getIncomingValue(0) == Phi) {
3745       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
3746     }
3747   }
3748 }
3749 
3750 void InnerLoopVectorizer::fixReduction(PHINode *Phi) {
3751   Constant *Zero = Builder.getInt32(0);
3752 
3753   // Get it's reduction variable descriptor.
3754   assert(Legal->isReductionVariable(Phi) &&
3755          "Unable to find the reduction variable");
3756   RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
3757 
3758   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3759   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
3760   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
3761   RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
3762     RdxDesc.getMinMaxRecurrenceKind();
3763   setDebugLocFromInst(Builder, ReductionStartValue);
3764 
3765   // We need to generate a reduction vector from the incoming scalar.
3766   // To do so, we need to generate the 'identity' vector and override
3767   // one of the elements with the incoming scalar reduction. We need
3768   // to do it in the vector-loop preheader.
3769   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3770 
3771   // This is the vector-clone of the value that leaves the loop.
3772   Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType();
3773 
3774   // Find the reduction identity variable. Zero for addition, or, xor,
3775   // one for multiplication, -1 for And.
3776   Value *Identity;
3777   Value *VectorStart;
3778   if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
3779       RK == RecurrenceDescriptor::RK_FloatMinMax) {
3780     // MinMax reduction have the start value as their identify.
3781     if (VF == 1) {
3782       VectorStart = Identity = ReductionStartValue;
3783     } else {
3784       VectorStart = Identity =
3785         Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident");
3786     }
3787   } else {
3788     // Handle other reduction kinds:
3789     Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
3790         RK, VecTy->getScalarType());
3791     if (VF == 1) {
3792       Identity = Iden;
3793       // This vector is the Identity vector where the first element is the
3794       // incoming scalar reduction.
3795       VectorStart = ReductionStartValue;
3796     } else {
3797       Identity = ConstantVector::getSplat({VF, false}, Iden);
3798 
3799       // This vector is the Identity vector where the first element is the
3800       // incoming scalar reduction.
3801       VectorStart =
3802         Builder.CreateInsertElement(Identity, ReductionStartValue, Zero);
3803     }
3804   }
3805 
3806   // Wrap flags are in general invalid after vectorization, clear them.
3807   clearReductionWrapFlags(RdxDesc);
3808 
3809   // Fix the vector-loop phi.
3810 
3811   // Reductions do not have to start at zero. They can start with
3812   // any loop invariant values.
3813   BasicBlock *Latch = OrigLoop->getLoopLatch();
3814   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
3815 
3816   for (unsigned Part = 0; Part < UF; ++Part) {
3817     Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part);
3818     Value *Val = getOrCreateVectorValue(LoopVal, Part);
3819     // Make sure to add the reduction start value only to the
3820     // first unroll part.
3821     Value *StartVal = (Part == 0) ? VectorStart : Identity;
3822     cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader);
3823     cast<PHINode>(VecRdxPhi)
3824       ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3825   }
3826 
3827   // Before each round, move the insertion point right between
3828   // the PHIs and the values we are going to write.
3829   // This allows us to write both PHINodes and the extractelement
3830   // instructions.
3831   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3832 
3833   setDebugLocFromInst(Builder, LoopExitInst);
3834 
3835   // If tail is folded by masking, the vector value to leave the loop should be
3836   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
3837   // instead of the former.
3838   if (Cost->foldTailByMasking()) {
3839     for (unsigned Part = 0; Part < UF; ++Part) {
3840       Value *VecLoopExitInst =
3841           VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3842       Value *Sel = nullptr;
3843       for (User *U : VecLoopExitInst->users()) {
3844         if (isa<SelectInst>(U)) {
3845           assert(!Sel && "Reduction exit feeding two selects");
3846           Sel = U;
3847         } else
3848           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
3849       }
3850       assert(Sel && "Reduction exit feeds no select");
3851       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, Sel);
3852     }
3853   }
3854 
3855   // If the vector reduction can be performed in a smaller type, we truncate
3856   // then extend the loop exit value to enable InstCombine to evaluate the
3857   // entire expression in the smaller type.
3858   if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) {
3859     Type *RdxVecTy = FixedVectorType::get(RdxDesc.getRecurrenceType(), VF);
3860     Builder.SetInsertPoint(
3861         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
3862     VectorParts RdxParts(UF);
3863     for (unsigned Part = 0; Part < UF; ++Part) {
3864       RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3865       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3866       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
3867                                         : Builder.CreateZExt(Trunc, VecTy);
3868       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
3869            UI != RdxParts[Part]->user_end();)
3870         if (*UI != Trunc) {
3871           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
3872           RdxParts[Part] = Extnd;
3873         } else {
3874           ++UI;
3875         }
3876     }
3877     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3878     for (unsigned Part = 0; Part < UF; ++Part) {
3879       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3880       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]);
3881     }
3882   }
3883 
3884   // Reduce all of the unrolled parts into a single vector.
3885   Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0);
3886   unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
3887 
3888   // The middle block terminator has already been assigned a DebugLoc here (the
3889   // OrigLoop's single latch terminator). We want the whole middle block to
3890   // appear to execute on this line because: (a) it is all compiler generated,
3891   // (b) these instructions are always executed after evaluating the latch
3892   // conditional branch, and (c) other passes may add new predecessors which
3893   // terminate on this line. This is the easiest way to ensure we don't
3894   // accidentally cause an extra step back into the loop while debugging.
3895   setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
3896   for (unsigned Part = 1; Part < UF; ++Part) {
3897     Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3898     if (Op != Instruction::ICmp && Op != Instruction::FCmp)
3899       // Floating point operations had to be 'fast' to enable the reduction.
3900       ReducedPartRdx = addFastMathFlag(
3901           Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart,
3902                               ReducedPartRdx, "bin.rdx"),
3903           RdxDesc.getFastMathFlags());
3904     else
3905       ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx,
3906                                       RdxPart);
3907   }
3908 
3909   if (VF > 1) {
3910     bool NoNaN = Legal->hasFunNoNaNAttr();
3911     ReducedPartRdx =
3912         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN);
3913     // If the reduction can be performed in a smaller type, we need to extend
3914     // the reduction to the wider type before we branch to the original loop.
3915     if (Phi->getType() != RdxDesc.getRecurrenceType())
3916       ReducedPartRdx =
3917         RdxDesc.isSigned()
3918         ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
3919         : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
3920   }
3921 
3922   // Create a phi node that merges control-flow from the backedge-taken check
3923   // block and the middle block.
3924   PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
3925                                         LoopScalarPreHeader->getTerminator());
3926   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
3927     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
3928   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
3929 
3930   // Now, we need to fix the users of the reduction variable
3931   // inside and outside of the scalar remainder loop.
3932   // We know that the loop is in LCSSA form. We need to update the
3933   // PHI nodes in the exit blocks.
3934   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3935     // All PHINodes need to have a single entry edge, or two if
3936     // we already fixed them.
3937     assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
3938 
3939     // We found a reduction value exit-PHI. Update it with the
3940     // incoming bypass edge.
3941     if (LCSSAPhi.getIncomingValue(0) == LoopExitInst)
3942       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
3943   } // end of the LCSSA phi scan.
3944 
3945     // Fix the scalar loop reduction variable with the incoming reduction sum
3946     // from the vector body and from the backedge value.
3947   int IncomingEdgeBlockIdx =
3948     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
3949   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
3950   // Pick the other block.
3951   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
3952   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
3953   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
3954 }
3955 
3956 void InnerLoopVectorizer::clearReductionWrapFlags(
3957     RecurrenceDescriptor &RdxDesc) {
3958   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3959   if (RK != RecurrenceDescriptor::RK_IntegerAdd &&
3960       RK != RecurrenceDescriptor::RK_IntegerMult)
3961     return;
3962 
3963   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
3964   assert(LoopExitInstr && "null loop exit instruction");
3965   SmallVector<Instruction *, 8> Worklist;
3966   SmallPtrSet<Instruction *, 8> Visited;
3967   Worklist.push_back(LoopExitInstr);
3968   Visited.insert(LoopExitInstr);
3969 
3970   while (!Worklist.empty()) {
3971     Instruction *Cur = Worklist.pop_back_val();
3972     if (isa<OverflowingBinaryOperator>(Cur))
3973       for (unsigned Part = 0; Part < UF; ++Part) {
3974         Value *V = getOrCreateVectorValue(Cur, Part);
3975         cast<Instruction>(V)->dropPoisonGeneratingFlags();
3976       }
3977 
3978     for (User *U : Cur->users()) {
3979       Instruction *UI = cast<Instruction>(U);
3980       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
3981           Visited.insert(UI).second)
3982         Worklist.push_back(UI);
3983     }
3984   }
3985 }
3986 
3987 void InnerLoopVectorizer::fixLCSSAPHIs() {
3988   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3989     if (LCSSAPhi.getNumIncomingValues() == 1) {
3990       auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
3991       // Non-instruction incoming values will have only one value.
3992       unsigned LastLane = 0;
3993       if (isa<Instruction>(IncomingValue))
3994           LastLane = Cost->isUniformAfterVectorization(
3995                          cast<Instruction>(IncomingValue), VF)
3996                          ? 0
3997                          : VF - 1;
3998       // Can be a loop invariant incoming value or the last scalar value to be
3999       // extracted from the vectorized loop.
4000       Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4001       Value *lastIncomingValue =
4002           getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane });
4003       LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4004     }
4005   }
4006 }
4007 
4008 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4009   // The basic block and loop containing the predicated instruction.
4010   auto *PredBB = PredInst->getParent();
4011   auto *VectorLoop = LI->getLoopFor(PredBB);
4012 
4013   // Initialize a worklist with the operands of the predicated instruction.
4014   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4015 
4016   // Holds instructions that we need to analyze again. An instruction may be
4017   // reanalyzed if we don't yet know if we can sink it or not.
4018   SmallVector<Instruction *, 8> InstsToReanalyze;
4019 
4020   // Returns true if a given use occurs in the predicated block. Phi nodes use
4021   // their operands in their corresponding predecessor blocks.
4022   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4023     auto *I = cast<Instruction>(U.getUser());
4024     BasicBlock *BB = I->getParent();
4025     if (auto *Phi = dyn_cast<PHINode>(I))
4026       BB = Phi->getIncomingBlock(
4027           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4028     return BB == PredBB;
4029   };
4030 
4031   // Iteratively sink the scalarized operands of the predicated instruction
4032   // into the block we created for it. When an instruction is sunk, it's
4033   // operands are then added to the worklist. The algorithm ends after one pass
4034   // through the worklist doesn't sink a single instruction.
4035   bool Changed;
4036   do {
4037     // Add the instructions that need to be reanalyzed to the worklist, and
4038     // reset the changed indicator.
4039     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4040     InstsToReanalyze.clear();
4041     Changed = false;
4042 
4043     while (!Worklist.empty()) {
4044       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4045 
4046       // We can't sink an instruction if it is a phi node, is already in the
4047       // predicated block, is not in the loop, or may have side effects.
4048       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
4049           !VectorLoop->contains(I) || I->mayHaveSideEffects())
4050         continue;
4051 
4052       // It's legal to sink the instruction if all its uses occur in the
4053       // predicated block. Otherwise, there's nothing to do yet, and we may
4054       // need to reanalyze the instruction.
4055       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4056         InstsToReanalyze.push_back(I);
4057         continue;
4058       }
4059 
4060       // Move the instruction to the beginning of the predicated block, and add
4061       // it's operands to the worklist.
4062       I->moveBefore(&*PredBB->getFirstInsertionPt());
4063       Worklist.insert(I->op_begin(), I->op_end());
4064 
4065       // The sinking may have enabled other instructions to be sunk, so we will
4066       // need to iterate.
4067       Changed = true;
4068     }
4069   } while (Changed);
4070 }
4071 
4072 void InnerLoopVectorizer::fixNonInductionPHIs() {
4073   for (PHINode *OrigPhi : OrigPHIsToFix) {
4074     PHINode *NewPhi =
4075         cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0));
4076     unsigned NumIncomingValues = OrigPhi->getNumIncomingValues();
4077 
4078     SmallVector<BasicBlock *, 2> ScalarBBPredecessors(
4079         predecessors(OrigPhi->getParent()));
4080     SmallVector<BasicBlock *, 2> VectorBBPredecessors(
4081         predecessors(NewPhi->getParent()));
4082     assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() &&
4083            "Scalar and Vector BB should have the same number of predecessors");
4084 
4085     // The insertion point in Builder may be invalidated by the time we get
4086     // here. Force the Builder insertion point to something valid so that we do
4087     // not run into issues during insertion point restore in
4088     // getOrCreateVectorValue calls below.
4089     Builder.SetInsertPoint(NewPhi);
4090 
4091     // The predecessor order is preserved and we can rely on mapping between
4092     // scalar and vector block predecessors.
4093     for (unsigned i = 0; i < NumIncomingValues; ++i) {
4094       BasicBlock *NewPredBB = VectorBBPredecessors[i];
4095 
4096       // When looking up the new scalar/vector values to fix up, use incoming
4097       // values from original phi.
4098       Value *ScIncV =
4099           OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]);
4100 
4101       // Scalar incoming value may need a broadcast
4102       Value *NewIncV = getOrCreateVectorValue(ScIncV, 0);
4103       NewPhi->addIncoming(NewIncV, NewPredBB);
4104     }
4105   }
4106 }
4107 
4108 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPUser &Operands,
4109                                    unsigned UF, unsigned VF,
4110                                    bool IsPtrLoopInvariant,
4111                                    SmallBitVector &IsIndexLoopInvariant,
4112                                    VPTransformState &State) {
4113   // Construct a vector GEP by widening the operands of the scalar GEP as
4114   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4115   // results in a vector of pointers when at least one operand of the GEP
4116   // is vector-typed. Thus, to keep the representation compact, we only use
4117   // vector-typed operands for loop-varying values.
4118 
4119   if (VF > 1 && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4120     // If we are vectorizing, but the GEP has only loop-invariant operands,
4121     // the GEP we build (by only using vector-typed operands for
4122     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4123     // produce a vector of pointers, we need to either arbitrarily pick an
4124     // operand to broadcast, or broadcast a clone of the original GEP.
4125     // Here, we broadcast a clone of the original.
4126     //
4127     // TODO: If at some point we decide to scalarize instructions having
4128     //       loop-invariant operands, this special case will no longer be
4129     //       required. We would add the scalarization decision to
4130     //       collectLoopScalars() and teach getVectorValue() to broadcast
4131     //       the lane-zero scalar value.
4132     auto *Clone = Builder.Insert(GEP->clone());
4133     for (unsigned Part = 0; Part < UF; ++Part) {
4134       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4135       VectorLoopValueMap.setVectorValue(GEP, Part, EntryPart);
4136       addMetadata(EntryPart, GEP);
4137     }
4138   } else {
4139     // If the GEP has at least one loop-varying operand, we are sure to
4140     // produce a vector of pointers. But if we are only unrolling, we want
4141     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4142     // produce with the code below will be scalar (if VF == 1) or vector
4143     // (otherwise). Note that for the unroll-only case, we still maintain
4144     // values in the vector mapping with initVector, as we do for other
4145     // instructions.
4146     for (unsigned Part = 0; Part < UF; ++Part) {
4147       // The pointer operand of the new GEP. If it's loop-invariant, we
4148       // won't broadcast it.
4149       auto *Ptr = IsPtrLoopInvariant ? State.get(Operands.getOperand(0), {0, 0})
4150                                      : State.get(Operands.getOperand(0), Part);
4151 
4152       // Collect all the indices for the new GEP. If any index is
4153       // loop-invariant, we won't broadcast it.
4154       SmallVector<Value *, 4> Indices;
4155       for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) {
4156         VPValue *Operand = Operands.getOperand(I);
4157         if (IsIndexLoopInvariant[I - 1])
4158           Indices.push_back(State.get(Operand, {0, 0}));
4159         else
4160           Indices.push_back(State.get(Operand, Part));
4161       }
4162 
4163       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4164       // but it should be a vector, otherwise.
4165       auto *NewGEP =
4166           GEP->isInBounds()
4167               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4168                                           Indices)
4169               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4170       assert((VF == 1 || NewGEP->getType()->isVectorTy()) &&
4171              "NewGEP is not a pointer vector");
4172       VectorLoopValueMap.setVectorValue(GEP, Part, NewGEP);
4173       addMetadata(NewGEP, GEP);
4174     }
4175   }
4176 }
4177 
4178 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF,
4179                                               unsigned VF) {
4180   PHINode *P = cast<PHINode>(PN);
4181   if (EnableVPlanNativePath) {
4182     // Currently we enter here in the VPlan-native path for non-induction
4183     // PHIs where all control flow is uniform. We simply widen these PHIs.
4184     // Create a vector phi with no operands - the vector phi operands will be
4185     // set at the end of vector code generation.
4186     Type *VecTy =
4187         (VF == 1) ? PN->getType() : FixedVectorType::get(PN->getType(), VF);
4188     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4189     VectorLoopValueMap.setVectorValue(P, 0, VecPhi);
4190     OrigPHIsToFix.push_back(P);
4191 
4192     return;
4193   }
4194 
4195   assert(PN->getParent() == OrigLoop->getHeader() &&
4196          "Non-header phis should have been handled elsewhere");
4197 
4198   // In order to support recurrences we need to be able to vectorize Phi nodes.
4199   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4200   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4201   // this value when we vectorize all of the instructions that use the PHI.
4202   if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
4203     for (unsigned Part = 0; Part < UF; ++Part) {
4204       // This is phase one of vectorizing PHIs.
4205       Type *VecTy =
4206           (VF == 1) ? PN->getType() : FixedVectorType::get(PN->getType(), VF);
4207       Value *EntryPart = PHINode::Create(
4208           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4209       VectorLoopValueMap.setVectorValue(P, Part, EntryPart);
4210     }
4211     return;
4212   }
4213 
4214   setDebugLocFromInst(Builder, P);
4215 
4216   // This PHINode must be an induction variable.
4217   // Make sure that we know about it.
4218   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4219 
4220   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4221   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4222 
4223   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4224   // which can be found from the original scalar operations.
4225   switch (II.getKind()) {
4226   case InductionDescriptor::IK_NoInduction:
4227     llvm_unreachable("Unknown induction");
4228   case InductionDescriptor::IK_IntInduction:
4229   case InductionDescriptor::IK_FpInduction:
4230     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4231   case InductionDescriptor::IK_PtrInduction: {
4232     // Handle the pointer induction variable case.
4233     assert(P->getType()->isPointerTy() && "Unexpected type.");
4234 
4235     if (Cost->isScalarAfterVectorization(P, VF)) {
4236       // This is the normalized GEP that starts counting at zero.
4237       Value *PtrInd =
4238           Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType());
4239       // Determine the number of scalars we need to generate for each unroll
4240       // iteration. If the instruction is uniform, we only need to generate the
4241       // first lane. Otherwise, we generate all VF values.
4242       unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF;
4243       for (unsigned Part = 0; Part < UF; ++Part) {
4244         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4245           Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF);
4246           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4247           Value *SclrGep =
4248               emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4249           SclrGep->setName("next.gep");
4250           VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep);
4251         }
4252       }
4253       return;
4254     }
4255     assert(isa<SCEVConstant>(II.getStep()) &&
4256            "Induction step not a SCEV constant!");
4257     Type *PhiType = II.getStep()->getType();
4258 
4259     // Build a pointer phi
4260     Value *ScalarStartValue = II.getStartValue();
4261     Type *ScStValueType = ScalarStartValue->getType();
4262     PHINode *NewPointerPhi =
4263         PHINode::Create(ScStValueType, 2, "pointer.phi", Induction);
4264     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4265 
4266     // A pointer induction, performed by using a gep
4267     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4268     Instruction *InductionLoc = LoopLatch->getTerminator();
4269     const SCEV *ScalarStep = II.getStep();
4270     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4271     Value *ScalarStepValue =
4272         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4273     Value *InductionGEP = GetElementPtrInst::Create(
4274         ScStValueType->getPointerElementType(), NewPointerPhi,
4275         Builder.CreateMul(ScalarStepValue, ConstantInt::get(PhiType, VF * UF)),
4276         "ptr.ind", InductionLoc);
4277     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4278 
4279     // Create UF many actual address geps that use the pointer
4280     // phi as base and a vectorized version of the step value
4281     // (<step*0, ..., step*N>) as offset.
4282     for (unsigned Part = 0; Part < UF; ++Part) {
4283       SmallVector<Constant *, 8> Indices;
4284       // Create a vector of consecutive numbers from zero to VF.
4285       for (unsigned i = 0; i < VF; ++i)
4286         Indices.push_back(ConstantInt::get(PhiType, i + Part * VF));
4287       Constant *StartOffset = ConstantVector::get(Indices);
4288 
4289       Value *GEP = Builder.CreateGEP(
4290           ScStValueType->getPointerElementType(), NewPointerPhi,
4291           Builder.CreateMul(StartOffset,
4292                             Builder.CreateVectorSplat(VF, ScalarStepValue),
4293                             "vector.gep"));
4294       VectorLoopValueMap.setVectorValue(P, Part, GEP);
4295     }
4296   }
4297   }
4298 }
4299 
4300 /// A helper function for checking whether an integer division-related
4301 /// instruction may divide by zero (in which case it must be predicated if
4302 /// executed conditionally in the scalar code).
4303 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4304 /// Non-zero divisors that are non compile-time constants will not be
4305 /// converted into multiplication, so we will still end up scalarizing
4306 /// the division, but can do so w/o predication.
4307 static bool mayDivideByZero(Instruction &I) {
4308   assert((I.getOpcode() == Instruction::UDiv ||
4309           I.getOpcode() == Instruction::SDiv ||
4310           I.getOpcode() == Instruction::URem ||
4311           I.getOpcode() == Instruction::SRem) &&
4312          "Unexpected instruction");
4313   Value *Divisor = I.getOperand(1);
4314   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4315   return !CInt || CInt->isZero();
4316 }
4317 
4318 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPUser &User,
4319                                            VPTransformState &State) {
4320   switch (I.getOpcode()) {
4321   case Instruction::Call:
4322   case Instruction::Br:
4323   case Instruction::PHI:
4324   case Instruction::GetElementPtr:
4325   case Instruction::Select:
4326     llvm_unreachable("This instruction is handled by a different recipe.");
4327   case Instruction::UDiv:
4328   case Instruction::SDiv:
4329   case Instruction::SRem:
4330   case Instruction::URem:
4331   case Instruction::Add:
4332   case Instruction::FAdd:
4333   case Instruction::Sub:
4334   case Instruction::FSub:
4335   case Instruction::FNeg:
4336   case Instruction::Mul:
4337   case Instruction::FMul:
4338   case Instruction::FDiv:
4339   case Instruction::FRem:
4340   case Instruction::Shl:
4341   case Instruction::LShr:
4342   case Instruction::AShr:
4343   case Instruction::And:
4344   case Instruction::Or:
4345   case Instruction::Xor: {
4346     // Just widen unops and binops.
4347     setDebugLocFromInst(Builder, &I);
4348 
4349     for (unsigned Part = 0; Part < UF; ++Part) {
4350       SmallVector<Value *, 2> Ops;
4351       for (VPValue *VPOp : User.operands())
4352         Ops.push_back(State.get(VPOp, Part));
4353 
4354       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
4355 
4356       if (auto *VecOp = dyn_cast<Instruction>(V))
4357         VecOp->copyIRFlags(&I);
4358 
4359       // Use this vector value for all users of the original instruction.
4360       VectorLoopValueMap.setVectorValue(&I, Part, V);
4361       addMetadata(V, &I);
4362     }
4363 
4364     break;
4365   }
4366   case Instruction::ICmp:
4367   case Instruction::FCmp: {
4368     // Widen compares. Generate vector compares.
4369     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4370     auto *Cmp = cast<CmpInst>(&I);
4371     setDebugLocFromInst(Builder, Cmp);
4372     for (unsigned Part = 0; Part < UF; ++Part) {
4373       Value *A = State.get(User.getOperand(0), Part);
4374       Value *B = State.get(User.getOperand(1), Part);
4375       Value *C = nullptr;
4376       if (FCmp) {
4377         // Propagate fast math flags.
4378         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4379         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4380         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4381       } else {
4382         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4383       }
4384       VectorLoopValueMap.setVectorValue(&I, Part, C);
4385       addMetadata(C, &I);
4386     }
4387 
4388     break;
4389   }
4390 
4391   case Instruction::ZExt:
4392   case Instruction::SExt:
4393   case Instruction::FPToUI:
4394   case Instruction::FPToSI:
4395   case Instruction::FPExt:
4396   case Instruction::PtrToInt:
4397   case Instruction::IntToPtr:
4398   case Instruction::SIToFP:
4399   case Instruction::UIToFP:
4400   case Instruction::Trunc:
4401   case Instruction::FPTrunc:
4402   case Instruction::BitCast: {
4403     auto *CI = cast<CastInst>(&I);
4404     setDebugLocFromInst(Builder, CI);
4405 
4406     /// Vectorize casts.
4407     Type *DestTy =
4408         (VF == 1) ? CI->getType() : FixedVectorType::get(CI->getType(), VF);
4409 
4410     for (unsigned Part = 0; Part < UF; ++Part) {
4411       Value *A = State.get(User.getOperand(0), Part);
4412       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4413       VectorLoopValueMap.setVectorValue(&I, Part, Cast);
4414       addMetadata(Cast, &I);
4415     }
4416     break;
4417   }
4418   default:
4419     // This instruction is not vectorized by simple widening.
4420     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4421     llvm_unreachable("Unhandled instruction!");
4422   } // end of switch.
4423 }
4424 
4425 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPUser &ArgOperands,
4426                                                VPTransformState &State) {
4427   assert(!isa<DbgInfoIntrinsic>(I) &&
4428          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4429   setDebugLocFromInst(Builder, &I);
4430 
4431   Module *M = I.getParent()->getParent()->getParent();
4432   auto *CI = cast<CallInst>(&I);
4433 
4434   SmallVector<Type *, 4> Tys;
4435   for (Value *ArgOperand : CI->arg_operands())
4436     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF));
4437 
4438   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4439 
4440   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4441   // version of the instruction.
4442   // Is it beneficial to perform intrinsic call compared to lib call?
4443   bool NeedToScalarize = false;
4444   unsigned CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4445   bool UseVectorIntrinsic =
4446       ID && Cost->getVectorIntrinsicCost(CI, VF) <= CallCost;
4447   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4448          "Instruction should be scalarized elsewhere.");
4449 
4450   for (unsigned Part = 0; Part < UF; ++Part) {
4451     SmallVector<Value *, 4> Args;
4452     for (auto &I : enumerate(ArgOperands.operands())) {
4453       // Some intrinsics have a scalar argument - don't replace it with a
4454       // vector.
4455       Value *Arg;
4456       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4457         Arg = State.get(I.value(), Part);
4458       else
4459         Arg = State.get(I.value(), {0, 0});
4460       Args.push_back(Arg);
4461     }
4462 
4463     Function *VectorF;
4464     if (UseVectorIntrinsic) {
4465       // Use vector version of the intrinsic.
4466       Type *TysForDecl[] = {CI->getType()};
4467       if (VF > 1)
4468         TysForDecl[0] =
4469             FixedVectorType::get(CI->getType()->getScalarType(), VF);
4470       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4471       assert(VectorF && "Can't retrieve vector intrinsic.");
4472     } else {
4473       // Use vector version of the function call.
4474       const VFShape Shape =
4475           VFShape::get(*CI, {VF, false} /*EC*/, false /*HasGlobalPred*/);
4476 #ifndef NDEBUG
4477       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4478              "Can't create vector function.");
4479 #endif
4480         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4481     }
4482       SmallVector<OperandBundleDef, 1> OpBundles;
4483       CI->getOperandBundlesAsDefs(OpBundles);
4484       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4485 
4486       if (isa<FPMathOperator>(V))
4487         V->copyFastMathFlags(CI);
4488 
4489       VectorLoopValueMap.setVectorValue(&I, Part, V);
4490       addMetadata(V, &I);
4491   }
4492 }
4493 
4494 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I,
4495                                                  VPUser &Operands,
4496                                                  bool InvariantCond,
4497                                                  VPTransformState &State) {
4498   setDebugLocFromInst(Builder, &I);
4499 
4500   // The condition can be loop invariant  but still defined inside the
4501   // loop. This means that we can't just use the original 'cond' value.
4502   // We have to take the 'vectorized' value and pick the first lane.
4503   // Instcombine will make this a no-op.
4504   auto *InvarCond =
4505       InvariantCond ? State.get(Operands.getOperand(0), {0, 0}) : nullptr;
4506 
4507   for (unsigned Part = 0; Part < UF; ++Part) {
4508     Value *Cond =
4509         InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part);
4510     Value *Op0 = State.get(Operands.getOperand(1), Part);
4511     Value *Op1 = State.get(Operands.getOperand(2), Part);
4512     Value *Sel = Builder.CreateSelect(Cond, Op0, Op1);
4513     VectorLoopValueMap.setVectorValue(&I, Part, Sel);
4514     addMetadata(Sel, &I);
4515   }
4516 }
4517 
4518 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
4519   // We should not collect Scalars more than once per VF. Right now, this
4520   // function is called from collectUniformsAndScalars(), which already does
4521   // this check. Collecting Scalars for VF=1 does not make any sense.
4522   assert(VF >= 2 && Scalars.find(VF) == Scalars.end() &&
4523          "This function should not be visited twice for the same VF");
4524 
4525   SmallSetVector<Instruction *, 8> Worklist;
4526 
4527   // These sets are used to seed the analysis with pointers used by memory
4528   // accesses that will remain scalar.
4529   SmallSetVector<Instruction *, 8> ScalarPtrs;
4530   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4531   auto *Latch = TheLoop->getLoopLatch();
4532 
4533   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4534   // The pointer operands of loads and stores will be scalar as long as the
4535   // memory access is not a gather or scatter operation. The value operand of a
4536   // store will remain scalar if the store is scalarized.
4537   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4538     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4539     assert(WideningDecision != CM_Unknown &&
4540            "Widening decision should be ready at this moment");
4541     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4542       if (Ptr == Store->getValueOperand())
4543         return WideningDecision == CM_Scalarize;
4544     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4545            "Ptr is neither a value or pointer operand");
4546     return WideningDecision != CM_GatherScatter;
4547   };
4548 
4549   // A helper that returns true if the given value is a bitcast or
4550   // getelementptr instruction contained in the loop.
4551   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4552     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4553             isa<GetElementPtrInst>(V)) &&
4554            !TheLoop->isLoopInvariant(V);
4555   };
4556 
4557   auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) {
4558     if (!isa<PHINode>(Ptr) ||
4559         !Legal->getInductionVars().count(cast<PHINode>(Ptr)))
4560       return false;
4561     auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)];
4562     if (Induction.getKind() != InductionDescriptor::IK_PtrInduction)
4563       return false;
4564     return isScalarUse(MemAccess, Ptr);
4565   };
4566 
4567   // A helper that evaluates a memory access's use of a pointer. If the
4568   // pointer is actually the pointer induction of a loop, it is being
4569   // inserted into Worklist. If the use will be a scalar use, and the
4570   // pointer is only used by memory accesses, we place the pointer in
4571   // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs.
4572   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4573     if (isScalarPtrInduction(MemAccess, Ptr)) {
4574       Worklist.insert(cast<Instruction>(Ptr));
4575       Instruction *Update = cast<Instruction>(
4576           cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch));
4577       Worklist.insert(Update);
4578       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr
4579                         << "\n");
4580       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update
4581                         << "\n");
4582       return;
4583     }
4584     // We only care about bitcast and getelementptr instructions contained in
4585     // the loop.
4586     if (!isLoopVaryingBitCastOrGEP(Ptr))
4587       return;
4588 
4589     // If the pointer has already been identified as scalar (e.g., if it was
4590     // also identified as uniform), there's nothing to do.
4591     auto *I = cast<Instruction>(Ptr);
4592     if (Worklist.count(I))
4593       return;
4594 
4595     // If the use of the pointer will be a scalar use, and all users of the
4596     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4597     // place the pointer in PossibleNonScalarPtrs.
4598     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4599           return isa<LoadInst>(U) || isa<StoreInst>(U);
4600         }))
4601       ScalarPtrs.insert(I);
4602     else
4603       PossibleNonScalarPtrs.insert(I);
4604   };
4605 
4606   // We seed the scalars analysis with three classes of instructions: (1)
4607   // instructions marked uniform-after-vectorization and (2) bitcast,
4608   // getelementptr and (pointer) phi instructions used by memory accesses
4609   // requiring a scalar use.
4610   //
4611   // (1) Add to the worklist all instructions that have been identified as
4612   // uniform-after-vectorization.
4613   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4614 
4615   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4616   // memory accesses requiring a scalar use. The pointer operands of loads and
4617   // stores will be scalar as long as the memory accesses is not a gather or
4618   // scatter operation. The value operand of a store will remain scalar if the
4619   // store is scalarized.
4620   for (auto *BB : TheLoop->blocks())
4621     for (auto &I : *BB) {
4622       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4623         evaluatePtrUse(Load, Load->getPointerOperand());
4624       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4625         evaluatePtrUse(Store, Store->getPointerOperand());
4626         evaluatePtrUse(Store, Store->getValueOperand());
4627       }
4628     }
4629   for (auto *I : ScalarPtrs)
4630     if (!PossibleNonScalarPtrs.count(I)) {
4631       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4632       Worklist.insert(I);
4633     }
4634 
4635   // Insert the forced scalars.
4636   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4637   // induction variable when the PHI user is scalarized.
4638   auto ForcedScalar = ForcedScalars.find(VF);
4639   if (ForcedScalar != ForcedScalars.end())
4640     for (auto *I : ForcedScalar->second)
4641       Worklist.insert(I);
4642 
4643   // Expand the worklist by looking through any bitcasts and getelementptr
4644   // instructions we've already identified as scalar. This is similar to the
4645   // expansion step in collectLoopUniforms(); however, here we're only
4646   // expanding to include additional bitcasts and getelementptr instructions.
4647   unsigned Idx = 0;
4648   while (Idx != Worklist.size()) {
4649     Instruction *Dst = Worklist[Idx++];
4650     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4651       continue;
4652     auto *Src = cast<Instruction>(Dst->getOperand(0));
4653     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4654           auto *J = cast<Instruction>(U);
4655           return !TheLoop->contains(J) || Worklist.count(J) ||
4656                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4657                   isScalarUse(J, Src));
4658         })) {
4659       Worklist.insert(Src);
4660       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4661     }
4662   }
4663 
4664   // An induction variable will remain scalar if all users of the induction
4665   // variable and induction variable update remain scalar.
4666   for (auto &Induction : Legal->getInductionVars()) {
4667     auto *Ind = Induction.first;
4668     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4669 
4670     // If tail-folding is applied, the primary induction variable will be used
4671     // to feed a vector compare.
4672     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
4673       continue;
4674 
4675     // Determine if all users of the induction variable are scalar after
4676     // vectorization.
4677     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4678       auto *I = cast<Instruction>(U);
4679       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
4680     });
4681     if (!ScalarInd)
4682       continue;
4683 
4684     // Determine if all users of the induction variable update instruction are
4685     // scalar after vectorization.
4686     auto ScalarIndUpdate =
4687         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4688           auto *I = cast<Instruction>(U);
4689           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
4690         });
4691     if (!ScalarIndUpdate)
4692       continue;
4693 
4694     // The induction variable and its update instruction will remain scalar.
4695     Worklist.insert(Ind);
4696     Worklist.insert(IndUpdate);
4697     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4698     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4699                       << "\n");
4700   }
4701 
4702   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4703 }
4704 
4705 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) {
4706   if (!blockNeedsPredication(I->getParent()))
4707     return false;
4708   switch(I->getOpcode()) {
4709   default:
4710     break;
4711   case Instruction::Load:
4712   case Instruction::Store: {
4713     if (!Legal->isMaskRequired(I))
4714       return false;
4715     auto *Ptr = getLoadStorePointerOperand(I);
4716     auto *Ty = getMemInstValueType(I);
4717     // We have already decided how to vectorize this instruction, get that
4718     // result.
4719     if (VF > 1) {
4720       InstWidening WideningDecision = getWideningDecision(I, VF);
4721       assert(WideningDecision != CM_Unknown &&
4722              "Widening decision should be ready at this moment");
4723       return WideningDecision == CM_Scalarize;
4724     }
4725     const Align Alignment = getLoadStoreAlignment(I);
4726     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4727                                 isLegalMaskedGather(Ty, Alignment))
4728                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4729                                 isLegalMaskedScatter(Ty, Alignment));
4730   }
4731   case Instruction::UDiv:
4732   case Instruction::SDiv:
4733   case Instruction::SRem:
4734   case Instruction::URem:
4735     return mayDivideByZero(*I);
4736   }
4737   return false;
4738 }
4739 
4740 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I,
4741                                                                unsigned VF) {
4742   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4743   assert(getWideningDecision(I, VF) == CM_Unknown &&
4744          "Decision should not be set yet.");
4745   auto *Group = getInterleavedAccessGroup(I);
4746   assert(Group && "Must have a group.");
4747 
4748   // If the instruction's allocated size doesn't equal it's type size, it
4749   // requires padding and will be scalarized.
4750   auto &DL = I->getModule()->getDataLayout();
4751   auto *ScalarTy = getMemInstValueType(I);
4752   if (hasIrregularType(ScalarTy, DL, VF))
4753     return false;
4754 
4755   // Check if masking is required.
4756   // A Group may need masking for one of two reasons: it resides in a block that
4757   // needs predication, or it was decided to use masking to deal with gaps.
4758   bool PredicatedAccessRequiresMasking =
4759       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
4760   bool AccessWithGapsRequiresMasking =
4761       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
4762   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
4763     return true;
4764 
4765   // If masked interleaving is required, we expect that the user/target had
4766   // enabled it, because otherwise it either wouldn't have been created or
4767   // it should have been invalidated by the CostModel.
4768   assert(useMaskedInterleavedAccesses(TTI) &&
4769          "Masked interleave-groups for predicated accesses are not enabled.");
4770 
4771   auto *Ty = getMemInstValueType(I);
4772   const Align Alignment = getLoadStoreAlignment(I);
4773   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4774                           : TTI.isLegalMaskedStore(Ty, Alignment);
4775 }
4776 
4777 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I,
4778                                                                unsigned VF) {
4779   // Get and ensure we have a valid memory instruction.
4780   LoadInst *LI = dyn_cast<LoadInst>(I);
4781   StoreInst *SI = dyn_cast<StoreInst>(I);
4782   assert((LI || SI) && "Invalid memory instruction");
4783 
4784   auto *Ptr = getLoadStorePointerOperand(I);
4785 
4786   // In order to be widened, the pointer should be consecutive, first of all.
4787   if (!Legal->isConsecutivePtr(Ptr))
4788     return false;
4789 
4790   // If the instruction is a store located in a predicated block, it will be
4791   // scalarized.
4792   if (isScalarWithPredication(I))
4793     return false;
4794 
4795   // If the instruction's allocated size doesn't equal it's type size, it
4796   // requires padding and will be scalarized.
4797   auto &DL = I->getModule()->getDataLayout();
4798   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
4799   if (hasIrregularType(ScalarTy, DL, VF))
4800     return false;
4801 
4802   return true;
4803 }
4804 
4805 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
4806   // We should not collect Uniforms more than once per VF. Right now,
4807   // this function is called from collectUniformsAndScalars(), which
4808   // already does this check. Collecting Uniforms for VF=1 does not make any
4809   // sense.
4810 
4811   assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() &&
4812          "This function should not be visited twice for the same VF");
4813 
4814   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4815   // not analyze again.  Uniforms.count(VF) will return 1.
4816   Uniforms[VF].clear();
4817 
4818   // We now know that the loop is vectorizable!
4819   // Collect instructions inside the loop that will remain uniform after
4820   // vectorization.
4821 
4822   // Global values, params and instructions outside of current loop are out of
4823   // scope.
4824   auto isOutOfScope = [&](Value *V) -> bool {
4825     Instruction *I = dyn_cast<Instruction>(V);
4826     return (!I || !TheLoop->contains(I));
4827   };
4828 
4829   SetVector<Instruction *> Worklist;
4830   BasicBlock *Latch = TheLoop->getLoopLatch();
4831 
4832   // Instructions that are scalar with predication must not be considered
4833   // uniform after vectorization, because that would create an erroneous
4834   // replicating region where only a single instance out of VF should be formed.
4835   // TODO: optimize such seldom cases if found important, see PR40816.
4836   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
4837     if (isScalarWithPredication(I, VF)) {
4838       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
4839                         << *I << "\n");
4840       return;
4841     }
4842     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
4843     Worklist.insert(I);
4844   };
4845 
4846   // Start with the conditional branch. If the branch condition is an
4847   // instruction contained in the loop that is only used by the branch, it is
4848   // uniform.
4849   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4850   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
4851     addToWorklistIfAllowed(Cmp);
4852 
4853   // Holds consecutive and consecutive-like pointers. Consecutive-like pointers
4854   // are pointers that are treated like consecutive pointers during
4855   // vectorization. The pointer operands of interleaved accesses are an
4856   // example.
4857   SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs;
4858 
4859   // Holds pointer operands of instructions that are possibly non-uniform.
4860   SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs;
4861 
4862   auto isUniformDecision = [&](Instruction *I, unsigned VF) {
4863     InstWidening WideningDecision = getWideningDecision(I, VF);
4864     assert(WideningDecision != CM_Unknown &&
4865            "Widening decision should be ready at this moment");
4866 
4867     return (WideningDecision == CM_Widen ||
4868             WideningDecision == CM_Widen_Reverse ||
4869             WideningDecision == CM_Interleave);
4870   };
4871   // Iterate over the instructions in the loop, and collect all
4872   // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible
4873   // that a consecutive-like pointer operand will be scalarized, we collect it
4874   // in PossibleNonUniformPtrs instead. We use two sets here because a single
4875   // getelementptr instruction can be used by both vectorized and scalarized
4876   // memory instructions. For example, if a loop loads and stores from the same
4877   // location, but the store is conditional, the store will be scalarized, and
4878   // the getelementptr won't remain uniform.
4879   for (auto *BB : TheLoop->blocks())
4880     for (auto &I : *BB) {
4881       // If there's no pointer operand, there's nothing to do.
4882       auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
4883       if (!Ptr)
4884         continue;
4885 
4886       // True if all users of Ptr are memory accesses that have Ptr as their
4887       // pointer operand.
4888       auto UsersAreMemAccesses =
4889           llvm::all_of(Ptr->users(), [&](User *U) -> bool {
4890             return getLoadStorePointerOperand(U) == Ptr;
4891           });
4892 
4893       // Ensure the memory instruction will not be scalarized or used by
4894       // gather/scatter, making its pointer operand non-uniform. If the pointer
4895       // operand is used by any instruction other than a memory access, we
4896       // conservatively assume the pointer operand may be non-uniform.
4897       if (!UsersAreMemAccesses || !isUniformDecision(&I, VF))
4898         PossibleNonUniformPtrs.insert(Ptr);
4899 
4900       // If the memory instruction will be vectorized and its pointer operand
4901       // is consecutive-like, or interleaving - the pointer operand should
4902       // remain uniform.
4903       else
4904         ConsecutiveLikePtrs.insert(Ptr);
4905     }
4906 
4907   // Add to the Worklist all consecutive and consecutive-like pointers that
4908   // aren't also identified as possibly non-uniform.
4909   for (auto *V : ConsecutiveLikePtrs)
4910     if (!PossibleNonUniformPtrs.count(V))
4911       addToWorklistIfAllowed(V);
4912 
4913   // Expand Worklist in topological order: whenever a new instruction
4914   // is added , its users should be already inside Worklist.  It ensures
4915   // a uniform instruction will only be used by uniform instructions.
4916   unsigned idx = 0;
4917   while (idx != Worklist.size()) {
4918     Instruction *I = Worklist[idx++];
4919 
4920     for (auto OV : I->operand_values()) {
4921       // isOutOfScope operands cannot be uniform instructions.
4922       if (isOutOfScope(OV))
4923         continue;
4924       // First order recurrence Phi's should typically be considered
4925       // non-uniform.
4926       auto *OP = dyn_cast<PHINode>(OV);
4927       if (OP && Legal->isFirstOrderRecurrence(OP))
4928         continue;
4929       // If all the users of the operand are uniform, then add the
4930       // operand into the uniform worklist.
4931       auto *OI = cast<Instruction>(OV);
4932       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
4933             auto *J = cast<Instruction>(U);
4934             return Worklist.count(J) ||
4935                    (OI == getLoadStorePointerOperand(J) &&
4936                     isUniformDecision(J, VF));
4937           }))
4938         addToWorklistIfAllowed(OI);
4939     }
4940   }
4941 
4942   // Returns true if Ptr is the pointer operand of a memory access instruction
4943   // I, and I is known to not require scalarization.
4944   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
4945     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
4946   };
4947 
4948   // For an instruction to be added into Worklist above, all its users inside
4949   // the loop should also be in Worklist. However, this condition cannot be
4950   // true for phi nodes that form a cyclic dependence. We must process phi
4951   // nodes separately. An induction variable will remain uniform if all users
4952   // of the induction variable and induction variable update remain uniform.
4953   // The code below handles both pointer and non-pointer induction variables.
4954   for (auto &Induction : Legal->getInductionVars()) {
4955     auto *Ind = Induction.first;
4956     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4957 
4958     // Determine if all users of the induction variable are uniform after
4959     // vectorization.
4960     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4961       auto *I = cast<Instruction>(U);
4962       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4963              isVectorizedMemAccessUse(I, Ind);
4964     });
4965     if (!UniformInd)
4966       continue;
4967 
4968     // Determine if all users of the induction variable update instruction are
4969     // uniform after vectorization.
4970     auto UniformIndUpdate =
4971         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4972           auto *I = cast<Instruction>(U);
4973           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4974                  isVectorizedMemAccessUse(I, IndUpdate);
4975         });
4976     if (!UniformIndUpdate)
4977       continue;
4978 
4979     // The induction variable and its update instruction will remain uniform.
4980     addToWorklistIfAllowed(Ind);
4981     addToWorklistIfAllowed(IndUpdate);
4982   }
4983 
4984   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
4985 }
4986 
4987 bool LoopVectorizationCostModel::runtimeChecksRequired() {
4988   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
4989 
4990   if (Legal->getRuntimePointerChecking()->Need) {
4991     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
4992         "runtime pointer checks needed. Enable vectorization of this "
4993         "loop with '#pragma clang loop vectorize(enable)' when "
4994         "compiling with -Os/-Oz",
4995         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4996     return true;
4997   }
4998 
4999   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5000     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5001         "runtime SCEV checks needed. Enable vectorization of this "
5002         "loop with '#pragma clang loop vectorize(enable)' when "
5003         "compiling with -Os/-Oz",
5004         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5005     return true;
5006   }
5007 
5008   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5009   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5010     reportVectorizationFailure("Runtime stride check for small trip count",
5011         "runtime stride == 1 checks needed. Enable vectorization of "
5012         "this loop without such check by compiling with -Os/-Oz",
5013         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5014     return true;
5015   }
5016 
5017   return false;
5018 }
5019 
5020 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(unsigned UserVF,
5021                                                             unsigned UserIC) {
5022   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5023     // TODO: It may by useful to do since it's still likely to be dynamically
5024     // uniform if the target can skip.
5025     reportVectorizationFailure(
5026         "Not inserting runtime ptr check for divergent target",
5027         "runtime pointer checks needed. Not enabled for divergent target",
5028         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5029     return None;
5030   }
5031 
5032   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5033   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5034   if (TC == 1) {
5035     reportVectorizationFailure("Single iteration (non) loop",
5036         "loop trip count is one, irrelevant for vectorization",
5037         "SingleIterationLoop", ORE, TheLoop);
5038     return None;
5039   }
5040 
5041   switch (ScalarEpilogueStatus) {
5042   case CM_ScalarEpilogueAllowed:
5043     return UserVF ? UserVF : computeFeasibleMaxVF(TC);
5044   case CM_ScalarEpilogueNotNeededUsePredicate:
5045     LLVM_DEBUG(
5046         dbgs() << "LV: vector predicate hint/switch found.\n"
5047                << "LV: Not allowing scalar epilogue, creating predicated "
5048                << "vector loop.\n");
5049     break;
5050   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5051     // fallthrough as a special case of OptForSize
5052   case CM_ScalarEpilogueNotAllowedOptSize:
5053     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5054       LLVM_DEBUG(
5055           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5056     else
5057       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5058                         << "count.\n");
5059 
5060     // Bail if runtime checks are required, which are not good when optimising
5061     // for size.
5062     if (runtimeChecksRequired())
5063       return None;
5064     break;
5065   }
5066 
5067   // Now try the tail folding
5068 
5069   // Invalidate interleave groups that require an epilogue if we can't mask
5070   // the interleave-group.
5071   if (!useMaskedInterleavedAccesses(TTI)) {
5072     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5073            "No decisions should have been taken at this point");
5074     // Note: There is no need to invalidate any cost modeling decisions here, as
5075     // non where taken so far.
5076     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5077   }
5078 
5079   unsigned MaxVF = UserVF ? UserVF : computeFeasibleMaxVF(TC);
5080   assert((UserVF || isPowerOf2_32(MaxVF)) && "MaxVF must be a power of 2");
5081   unsigned MaxVFtimesIC = UserIC ? MaxVF * UserIC : MaxVF;
5082   if (TC > 0 && TC % MaxVFtimesIC == 0) {
5083     // Accept MaxVF if we do not have a tail.
5084     LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5085     return MaxVF;
5086   }
5087 
5088   // If we don't know the precise trip count, or if the trip count that we
5089   // found modulo the vectorization factor is not zero, try to fold the tail
5090   // by masking.
5091   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5092   if (Legal->prepareToFoldTailByMasking()) {
5093     FoldTailByMasking = true;
5094     return MaxVF;
5095   }
5096 
5097   if (TC == 0) {
5098     reportVectorizationFailure(
5099         "Unable to calculate the loop count due to complex control flow",
5100         "unable to calculate the loop count due to complex control flow",
5101         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5102     return None;
5103   }
5104 
5105   reportVectorizationFailure(
5106       "Cannot optimize for size and vectorize at the same time.",
5107       "cannot optimize for size and vectorize at the same time. "
5108       "Enable vectorization of this loop with '#pragma clang loop "
5109       "vectorize(enable)' when compiling with -Os/-Oz",
5110       "NoTailLoopWithOptForSize", ORE, TheLoop);
5111   return None;
5112 }
5113 
5114 unsigned
5115 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount) {
5116   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5117   unsigned SmallestType, WidestType;
5118   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5119   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
5120 
5121   // Get the maximum safe dependence distance in bits computed by LAA.
5122   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5123   // the memory accesses that is most restrictive (involved in the smallest
5124   // dependence distance).
5125   unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth();
5126 
5127   WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth);
5128 
5129   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5130   // Note that both WidestRegister and WidestType may not be a powers of 2.
5131   unsigned MaxVectorSize = PowerOf2Floor(WidestRegister / WidestType);
5132 
5133   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5134                     << " / " << WidestType << " bits.\n");
5135   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5136                     << WidestRegister << " bits.\n");
5137 
5138   assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements"
5139                                  " into one vector!");
5140   if (MaxVectorSize == 0) {
5141     LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
5142     MaxVectorSize = 1;
5143     return MaxVectorSize;
5144   } else if (ConstTripCount && ConstTripCount < MaxVectorSize &&
5145              isPowerOf2_32(ConstTripCount)) {
5146     // We need to clamp the VF to be the ConstTripCount. There is no point in
5147     // choosing a higher viable VF as done in the loop below.
5148     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5149                       << ConstTripCount << "\n");
5150     MaxVectorSize = ConstTripCount;
5151     return MaxVectorSize;
5152   }
5153 
5154   unsigned MaxVF = MaxVectorSize;
5155   if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) ||
5156       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5157     // Collect all viable vectorization factors larger than the default MaxVF
5158     // (i.e. MaxVectorSize).
5159     SmallVector<unsigned, 8> VFs;
5160     unsigned NewMaxVectorSize = WidestRegister / SmallestType;
5161     for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2)
5162       VFs.push_back(VS);
5163 
5164     // For each VF calculate its register usage.
5165     auto RUs = calculateRegisterUsage(VFs);
5166 
5167     // Select the largest VF which doesn't require more registers than existing
5168     // ones.
5169     for (int i = RUs.size() - 1; i >= 0; --i) {
5170       bool Selected = true;
5171       for (auto& pair : RUs[i].MaxLocalUsers) {
5172         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5173         if (pair.second > TargetNumRegisters)
5174           Selected = false;
5175       }
5176       if (Selected) {
5177         MaxVF = VFs[i];
5178         break;
5179       }
5180     }
5181     if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) {
5182       if (MaxVF < MinVF) {
5183         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5184                           << ") with target's minimum: " << MinVF << '\n');
5185         MaxVF = MinVF;
5186       }
5187     }
5188   }
5189   return MaxVF;
5190 }
5191 
5192 VectorizationFactor
5193 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
5194   float Cost = expectedCost(1).first;
5195   const float ScalarCost = Cost;
5196   unsigned Width = 1;
5197   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
5198 
5199   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5200   if (ForceVectorization && MaxVF > 1) {
5201     // Ignore scalar width, because the user explicitly wants vectorization.
5202     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5203     // evaluation.
5204     Cost = std::numeric_limits<float>::max();
5205   }
5206 
5207   for (unsigned i = 2; i <= MaxVF; i *= 2) {
5208     // Notice that the vector loop needs to be executed less times, so
5209     // we need to divide the cost of the vector loops by the width of
5210     // the vector elements.
5211     VectorizationCostTy C = expectedCost(i);
5212     float VectorCost = C.first / (float)i;
5213     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5214                       << " costs: " << (int)VectorCost << ".\n");
5215     if (!C.second && !ForceVectorization) {
5216       LLVM_DEBUG(
5217           dbgs() << "LV: Not considering vector loop of width " << i
5218                  << " because it will not generate any vector instructions.\n");
5219       continue;
5220     }
5221     if (VectorCost < Cost) {
5222       Cost = VectorCost;
5223       Width = i;
5224     }
5225   }
5226 
5227   if (!EnableCondStoresVectorization && NumPredStores) {
5228     reportVectorizationFailure("There are conditional stores.",
5229         "store that is conditionally executed prevents vectorization",
5230         "ConditionalStore", ORE, TheLoop);
5231     Width = 1;
5232     Cost = ScalarCost;
5233   }
5234 
5235   LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
5236              << "LV: Vectorization seems to be not beneficial, "
5237              << "but was forced by a user.\n");
5238   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
5239   VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)};
5240   return Factor;
5241 }
5242 
5243 std::pair<unsigned, unsigned>
5244 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5245   unsigned MinWidth = -1U;
5246   unsigned MaxWidth = 8;
5247   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5248 
5249   // For each block.
5250   for (BasicBlock *BB : TheLoop->blocks()) {
5251     // For each instruction in the loop.
5252     for (Instruction &I : BB->instructionsWithoutDebug()) {
5253       Type *T = I.getType();
5254 
5255       // Skip ignored values.
5256       if (ValuesToIgnore.count(&I))
5257         continue;
5258 
5259       // Only examine Loads, Stores and PHINodes.
5260       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5261         continue;
5262 
5263       // Examine PHI nodes that are reduction variables. Update the type to
5264       // account for the recurrence type.
5265       if (auto *PN = dyn_cast<PHINode>(&I)) {
5266         if (!Legal->isReductionVariable(PN))
5267           continue;
5268         RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN];
5269         T = RdxDesc.getRecurrenceType();
5270       }
5271 
5272       // Examine the stored values.
5273       if (auto *ST = dyn_cast<StoreInst>(&I))
5274         T = ST->getValueOperand()->getType();
5275 
5276       // Ignore loaded pointer types and stored pointer types that are not
5277       // vectorizable.
5278       //
5279       // FIXME: The check here attempts to predict whether a load or store will
5280       //        be vectorized. We only know this for certain after a VF has
5281       //        been selected. Here, we assume that if an access can be
5282       //        vectorized, it will be. We should also look at extending this
5283       //        optimization to non-pointer types.
5284       //
5285       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
5286           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
5287         continue;
5288 
5289       MinWidth = std::min(MinWidth,
5290                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5291       MaxWidth = std::max(MaxWidth,
5292                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5293     }
5294   }
5295 
5296   return {MinWidth, MaxWidth};
5297 }
5298 
5299 unsigned LoopVectorizationCostModel::selectInterleaveCount(unsigned VF,
5300                                                            unsigned LoopCost) {
5301   // -- The interleave heuristics --
5302   // We interleave the loop in order to expose ILP and reduce the loop overhead.
5303   // There are many micro-architectural considerations that we can't predict
5304   // at this level. For example, frontend pressure (on decode or fetch) due to
5305   // code size, or the number and capabilities of the execution ports.
5306   //
5307   // We use the following heuristics to select the interleave count:
5308   // 1. If the code has reductions, then we interleave to break the cross
5309   // iteration dependency.
5310   // 2. If the loop is really small, then we interleave to reduce the loop
5311   // overhead.
5312   // 3. We don't interleave if we think that we will spill registers to memory
5313   // due to the increased register pressure.
5314 
5315   if (!isScalarEpilogueAllowed())
5316     return 1;
5317 
5318   // We used the distance for the interleave count.
5319   if (Legal->getMaxSafeDepDistBytes() != -1U)
5320     return 1;
5321 
5322   // Do not interleave loops with a relatively small known or estimated trip
5323   // count.
5324   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
5325   if (BestKnownTC && *BestKnownTC < TinyTripCountInterleaveThreshold)
5326     return 1;
5327 
5328   RegisterUsage R = calculateRegisterUsage({VF})[0];
5329   // We divide by these constants so assume that we have at least one
5330   // instruction that uses at least one register.
5331   for (auto& pair : R.MaxLocalUsers) {
5332     pair.second = std::max(pair.second, 1U);
5333   }
5334 
5335   // We calculate the interleave count using the following formula.
5336   // Subtract the number of loop invariants from the number of available
5337   // registers. These registers are used by all of the interleaved instances.
5338   // Next, divide the remaining registers by the number of registers that is
5339   // required by the loop, in order to estimate how many parallel instances
5340   // fit without causing spills. All of this is rounded down if necessary to be
5341   // a power of two. We want power of two interleave count to simplify any
5342   // addressing operations or alignment considerations.
5343   // We also want power of two interleave counts to ensure that the induction
5344   // variable of the vector loop wraps to zero, when tail is folded by masking;
5345   // this currently happens when OptForSize, in which case IC is set to 1 above.
5346   unsigned IC = UINT_MAX;
5347 
5348   for (auto& pair : R.MaxLocalUsers) {
5349     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5350     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
5351                       << " registers of "
5352                       << TTI.getRegisterClassName(pair.first) << " register class\n");
5353     if (VF == 1) {
5354       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
5355         TargetNumRegisters = ForceTargetNumScalarRegs;
5356     } else {
5357       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
5358         TargetNumRegisters = ForceTargetNumVectorRegs;
5359     }
5360     unsigned MaxLocalUsers = pair.second;
5361     unsigned LoopInvariantRegs = 0;
5362     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
5363       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
5364 
5365     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
5366     // Don't count the induction variable as interleaved.
5367     if (EnableIndVarRegisterHeur) {
5368       TmpIC =
5369           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
5370                         std::max(1U, (MaxLocalUsers - 1)));
5371     }
5372 
5373     IC = std::min(IC, TmpIC);
5374   }
5375 
5376   // Clamp the interleave ranges to reasonable counts.
5377   unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
5378 
5379   // Check if the user has overridden the max.
5380   if (VF == 1) {
5381     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
5382       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
5383   } else {
5384     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
5385       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
5386   }
5387 
5388   // If trip count is known or estimated compile time constant, limit the
5389   // interleave count to be less than the trip count divided by VF.
5390   if (BestKnownTC) {
5391     MaxInterleaveCount = std::min(*BestKnownTC / VF, MaxInterleaveCount);
5392   }
5393 
5394   // If we did not calculate the cost for VF (because the user selected the VF)
5395   // then we calculate the cost of VF here.
5396   if (LoopCost == 0)
5397     LoopCost = expectedCost(VF).first;
5398 
5399   assert(LoopCost && "Non-zero loop cost expected");
5400 
5401   // Clamp the calculated IC to be between the 1 and the max interleave count
5402   // that the target and trip count allows.
5403   if (IC > MaxInterleaveCount)
5404     IC = MaxInterleaveCount;
5405   else if (IC < 1)
5406     IC = 1;
5407 
5408   // Interleave if we vectorized this loop and there is a reduction that could
5409   // benefit from interleaving.
5410   if (VF > 1 && !Legal->getReductionVars().empty()) {
5411     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
5412     return IC;
5413   }
5414 
5415   // Note that if we've already vectorized the loop we will have done the
5416   // runtime check and so interleaving won't require further checks.
5417   bool InterleavingRequiresRuntimePointerCheck =
5418       (VF == 1 && Legal->getRuntimePointerChecking()->Need);
5419 
5420   // We want to interleave small loops in order to reduce the loop overhead and
5421   // potentially expose ILP opportunities.
5422   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
5423   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
5424     // We assume that the cost overhead is 1 and we use the cost model
5425     // to estimate the cost of the loop and interleave until the cost of the
5426     // loop overhead is about 5% of the cost of the loop.
5427     unsigned SmallIC =
5428         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
5429 
5430     // Interleave until store/load ports (estimated by max interleave count) are
5431     // saturated.
5432     unsigned NumStores = Legal->getNumStores();
5433     unsigned NumLoads = Legal->getNumLoads();
5434     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5435     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5436 
5437     // If we have a scalar reduction (vector reductions are already dealt with
5438     // by this point), we can increase the critical path length if the loop
5439     // we're interleaving is inside another loop. Limit, by default to 2, so the
5440     // critical path only gets increased by one reduction operation.
5441     if (!Legal->getReductionVars().empty() && TheLoop->getLoopDepth() > 1) {
5442       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
5443       SmallIC = std::min(SmallIC, F);
5444       StoresIC = std::min(StoresIC, F);
5445       LoadsIC = std::min(LoadsIC, F);
5446     }
5447 
5448     if (EnableLoadStoreRuntimeInterleave &&
5449         std::max(StoresIC, LoadsIC) > SmallIC) {
5450       LLVM_DEBUG(
5451           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
5452       return std::max(StoresIC, LoadsIC);
5453     }
5454 
5455     LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
5456     return SmallIC;
5457   }
5458 
5459   // Interleave if this is a large loop (small loops are already dealt with by
5460   // this point) that could benefit from interleaving.
5461   bool HasReductions = !Legal->getReductionVars().empty();
5462   if (TTI.enableAggressiveInterleaving(HasReductions)) {
5463     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5464     return IC;
5465   }
5466 
5467   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
5468   return 1;
5469 }
5470 
5471 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
5472 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
5473   // This function calculates the register usage by measuring the highest number
5474   // of values that are alive at a single location. Obviously, this is a very
5475   // rough estimation. We scan the loop in a topological order in order and
5476   // assign a number to each instruction. We use RPO to ensure that defs are
5477   // met before their users. We assume that each instruction that has in-loop
5478   // users starts an interval. We record every time that an in-loop value is
5479   // used, so we have a list of the first and last occurrences of each
5480   // instruction. Next, we transpose this data structure into a multi map that
5481   // holds the list of intervals that *end* at a specific location. This multi
5482   // map allows us to perform a linear search. We scan the instructions linearly
5483   // and record each time that a new interval starts, by placing it in a set.
5484   // If we find this value in the multi-map then we remove it from the set.
5485   // The max register usage is the maximum size of the set.
5486   // We also search for instructions that are defined outside the loop, but are
5487   // used inside the loop. We need this number separately from the max-interval
5488   // usage number because when we unroll, loop-invariant values do not take
5489   // more register.
5490   LoopBlocksDFS DFS(TheLoop);
5491   DFS.perform(LI);
5492 
5493   RegisterUsage RU;
5494 
5495   // Each 'key' in the map opens a new interval. The values
5496   // of the map are the index of the 'last seen' usage of the
5497   // instruction that is the key.
5498   using IntervalMap = DenseMap<Instruction *, unsigned>;
5499 
5500   // Maps instruction to its index.
5501   SmallVector<Instruction *, 64> IdxToInstr;
5502   // Marks the end of each interval.
5503   IntervalMap EndPoint;
5504   // Saves the list of instruction indices that are used in the loop.
5505   SmallPtrSet<Instruction *, 8> Ends;
5506   // Saves the list of values that are used in the loop but are
5507   // defined outside the loop, such as arguments and constants.
5508   SmallPtrSet<Value *, 8> LoopInvariants;
5509 
5510   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
5511     for (Instruction &I : BB->instructionsWithoutDebug()) {
5512       IdxToInstr.push_back(&I);
5513 
5514       // Save the end location of each USE.
5515       for (Value *U : I.operands()) {
5516         auto *Instr = dyn_cast<Instruction>(U);
5517 
5518         // Ignore non-instruction values such as arguments, constants, etc.
5519         if (!Instr)
5520           continue;
5521 
5522         // If this instruction is outside the loop then record it and continue.
5523         if (!TheLoop->contains(Instr)) {
5524           LoopInvariants.insert(Instr);
5525           continue;
5526         }
5527 
5528         // Overwrite previous end points.
5529         EndPoint[Instr] = IdxToInstr.size();
5530         Ends.insert(Instr);
5531       }
5532     }
5533   }
5534 
5535   // Saves the list of intervals that end with the index in 'key'.
5536   using InstrList = SmallVector<Instruction *, 2>;
5537   DenseMap<unsigned, InstrList> TransposeEnds;
5538 
5539   // Transpose the EndPoints to a list of values that end at each index.
5540   for (auto &Interval : EndPoint)
5541     TransposeEnds[Interval.second].push_back(Interval.first);
5542 
5543   SmallPtrSet<Instruction *, 8> OpenIntervals;
5544 
5545   // Get the size of the widest register.
5546   unsigned MaxSafeDepDist = -1U;
5547   if (Legal->getMaxSafeDepDistBytes() != -1U)
5548     MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
5549   unsigned WidestRegister =
5550       std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist);
5551   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5552 
5553   SmallVector<RegisterUsage, 8> RUs(VFs.size());
5554   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
5555 
5556   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
5557 
5558   // A lambda that gets the register usage for the given type and VF.
5559   auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
5560     if (Ty->isTokenTy())
5561       return 0U;
5562     unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
5563     return std::max<unsigned>(1, VF * TypeSize / WidestRegister);
5564   };
5565 
5566   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
5567     Instruction *I = IdxToInstr[i];
5568 
5569     // Remove all of the instructions that end at this location.
5570     InstrList &List = TransposeEnds[i];
5571     for (Instruction *ToRemove : List)
5572       OpenIntervals.erase(ToRemove);
5573 
5574     // Ignore instructions that are never used within the loop.
5575     if (!Ends.count(I))
5576       continue;
5577 
5578     // Skip ignored values.
5579     if (ValuesToIgnore.count(I))
5580       continue;
5581 
5582     // For each VF find the maximum usage of registers.
5583     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
5584       // Count the number of live intervals.
5585       SmallMapVector<unsigned, unsigned, 4> RegUsage;
5586 
5587       if (VFs[j] == 1) {
5588         for (auto Inst : OpenIntervals) {
5589           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
5590           if (RegUsage.find(ClassID) == RegUsage.end())
5591             RegUsage[ClassID] = 1;
5592           else
5593             RegUsage[ClassID] += 1;
5594         }
5595       } else {
5596         collectUniformsAndScalars(VFs[j]);
5597         for (auto Inst : OpenIntervals) {
5598           // Skip ignored values for VF > 1.
5599           if (VecValuesToIgnore.count(Inst))
5600             continue;
5601           if (isScalarAfterVectorization(Inst, VFs[j])) {
5602             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
5603             if (RegUsage.find(ClassID) == RegUsage.end())
5604               RegUsage[ClassID] = 1;
5605             else
5606               RegUsage[ClassID] += 1;
5607           } else {
5608             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
5609             if (RegUsage.find(ClassID) == RegUsage.end())
5610               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
5611             else
5612               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
5613           }
5614         }
5615       }
5616 
5617       for (auto& pair : RegUsage) {
5618         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
5619           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
5620         else
5621           MaxUsages[j][pair.first] = pair.second;
5622       }
5623     }
5624 
5625     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
5626                       << OpenIntervals.size() << '\n');
5627 
5628     // Add the current instruction to the list of open intervals.
5629     OpenIntervals.insert(I);
5630   }
5631 
5632   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
5633     SmallMapVector<unsigned, unsigned, 4> Invariant;
5634 
5635     for (auto Inst : LoopInvariants) {
5636       unsigned Usage = VFs[i] == 1 ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
5637       unsigned ClassID = TTI.getRegisterClassForType(VFs[i] > 1, Inst->getType());
5638       if (Invariant.find(ClassID) == Invariant.end())
5639         Invariant[ClassID] = Usage;
5640       else
5641         Invariant[ClassID] += Usage;
5642     }
5643 
5644     LLVM_DEBUG({
5645       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
5646       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
5647              << " item\n";
5648       for (const auto &pair : MaxUsages[i]) {
5649         dbgs() << "LV(REG): RegisterClass: "
5650                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
5651                << " registers\n";
5652       }
5653       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
5654              << " item\n";
5655       for (const auto &pair : Invariant) {
5656         dbgs() << "LV(REG): RegisterClass: "
5657                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
5658                << " registers\n";
5659       }
5660     });
5661 
5662     RU.LoopInvariantRegs = Invariant;
5663     RU.MaxLocalUsers = MaxUsages[i];
5664     RUs[i] = RU;
5665   }
5666 
5667   return RUs;
5668 }
5669 
5670 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
5671   // TODO: Cost model for emulated masked load/store is completely
5672   // broken. This hack guides the cost model to use an artificially
5673   // high enough value to practically disable vectorization with such
5674   // operations, except where previously deployed legality hack allowed
5675   // using very low cost values. This is to avoid regressions coming simply
5676   // from moving "masked load/store" check from legality to cost model.
5677   // Masked Load/Gather emulation was previously never allowed.
5678   // Limited number of Masked Store/Scatter emulation was allowed.
5679   assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction");
5680   return isa<LoadInst>(I) ||
5681          (isa<StoreInst>(I) &&
5682           NumPredStores > NumberOfStoresToPredicate);
5683 }
5684 
5685 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) {
5686   // If we aren't vectorizing the loop, or if we've already collected the
5687   // instructions to scalarize, there's nothing to do. Collection may already
5688   // have occurred if we have a user-selected VF and are now computing the
5689   // expected cost for interleaving.
5690   if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end())
5691     return;
5692 
5693   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
5694   // not profitable to scalarize any instructions, the presence of VF in the
5695   // map will indicate that we've analyzed it already.
5696   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
5697 
5698   // Find all the instructions that are scalar with predication in the loop and
5699   // determine if it would be better to not if-convert the blocks they are in.
5700   // If so, we also record the instructions to scalarize.
5701   for (BasicBlock *BB : TheLoop->blocks()) {
5702     if (!blockNeedsPredication(BB))
5703       continue;
5704     for (Instruction &I : *BB)
5705       if (isScalarWithPredication(&I)) {
5706         ScalarCostsTy ScalarCosts;
5707         // Do not apply discount logic if hacked cost is needed
5708         // for emulated masked memrefs.
5709         if (!useEmulatedMaskMemRefHack(&I) &&
5710             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
5711           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
5712         // Remember that BB will remain after vectorization.
5713         PredicatedBBsAfterVectorization.insert(BB);
5714       }
5715   }
5716 }
5717 
5718 int LoopVectorizationCostModel::computePredInstDiscount(
5719     Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts,
5720     unsigned VF) {
5721   assert(!isUniformAfterVectorization(PredInst, VF) &&
5722          "Instruction marked uniform-after-vectorization will be predicated");
5723 
5724   // Initialize the discount to zero, meaning that the scalar version and the
5725   // vector version cost the same.
5726   int Discount = 0;
5727 
5728   // Holds instructions to analyze. The instructions we visit are mapped in
5729   // ScalarCosts. Those instructions are the ones that would be scalarized if
5730   // we find that the scalar version costs less.
5731   SmallVector<Instruction *, 8> Worklist;
5732 
5733   // Returns true if the given instruction can be scalarized.
5734   auto canBeScalarized = [&](Instruction *I) -> bool {
5735     // We only attempt to scalarize instructions forming a single-use chain
5736     // from the original predicated block that would otherwise be vectorized.
5737     // Although not strictly necessary, we give up on instructions we know will
5738     // already be scalar to avoid traversing chains that are unlikely to be
5739     // beneficial.
5740     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
5741         isScalarAfterVectorization(I, VF))
5742       return false;
5743 
5744     // If the instruction is scalar with predication, it will be analyzed
5745     // separately. We ignore it within the context of PredInst.
5746     if (isScalarWithPredication(I))
5747       return false;
5748 
5749     // If any of the instruction's operands are uniform after vectorization,
5750     // the instruction cannot be scalarized. This prevents, for example, a
5751     // masked load from being scalarized.
5752     //
5753     // We assume we will only emit a value for lane zero of an instruction
5754     // marked uniform after vectorization, rather than VF identical values.
5755     // Thus, if we scalarize an instruction that uses a uniform, we would
5756     // create uses of values corresponding to the lanes we aren't emitting code
5757     // for. This behavior can be changed by allowing getScalarValue to clone
5758     // the lane zero values for uniforms rather than asserting.
5759     for (Use &U : I->operands())
5760       if (auto *J = dyn_cast<Instruction>(U.get()))
5761         if (isUniformAfterVectorization(J, VF))
5762           return false;
5763 
5764     // Otherwise, we can scalarize the instruction.
5765     return true;
5766   };
5767 
5768   // Compute the expected cost discount from scalarizing the entire expression
5769   // feeding the predicated instruction. We currently only consider expressions
5770   // that are single-use instruction chains.
5771   Worklist.push_back(PredInst);
5772   while (!Worklist.empty()) {
5773     Instruction *I = Worklist.pop_back_val();
5774 
5775     // If we've already analyzed the instruction, there's nothing to do.
5776     if (ScalarCosts.find(I) != ScalarCosts.end())
5777       continue;
5778 
5779     // Compute the cost of the vector instruction. Note that this cost already
5780     // includes the scalarization overhead of the predicated instruction.
5781     unsigned VectorCost = getInstructionCost(I, VF).first;
5782 
5783     // Compute the cost of the scalarized instruction. This cost is the cost of
5784     // the instruction as if it wasn't if-converted and instead remained in the
5785     // predicated block. We will scale this cost by block probability after
5786     // computing the scalarization overhead.
5787     unsigned ScalarCost = VF * getInstructionCost(I, 1).first;
5788 
5789     // Compute the scalarization overhead of needed insertelement instructions
5790     // and phi nodes.
5791     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
5792       ScalarCost += TTI.getScalarizationOverhead(
5793           cast<VectorType>(ToVectorTy(I->getType(), VF)),
5794           APInt::getAllOnesValue(VF), true, false);
5795       ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI,
5796                                             TTI::TCK_RecipThroughput);
5797     }
5798 
5799     // Compute the scalarization overhead of needed extractelement
5800     // instructions. For each of the instruction's operands, if the operand can
5801     // be scalarized, add it to the worklist; otherwise, account for the
5802     // overhead.
5803     for (Use &U : I->operands())
5804       if (auto *J = dyn_cast<Instruction>(U.get())) {
5805         assert(VectorType::isValidElementType(J->getType()) &&
5806                "Instruction has non-scalar type");
5807         if (canBeScalarized(J))
5808           Worklist.push_back(J);
5809         else if (needsExtract(J, VF))
5810           ScalarCost += TTI.getScalarizationOverhead(
5811               cast<VectorType>(ToVectorTy(J->getType(), VF)),
5812               APInt::getAllOnesValue(VF), false, true);
5813       }
5814 
5815     // Scale the total scalar cost by block probability.
5816     ScalarCost /= getReciprocalPredBlockProb();
5817 
5818     // Compute the discount. A non-negative discount means the vector version
5819     // of the instruction costs more, and scalarizing would be beneficial.
5820     Discount += VectorCost - ScalarCost;
5821     ScalarCosts[I] = ScalarCost;
5822   }
5823 
5824   return Discount;
5825 }
5826 
5827 LoopVectorizationCostModel::VectorizationCostTy
5828 LoopVectorizationCostModel::expectedCost(unsigned VF) {
5829   VectorizationCostTy Cost;
5830 
5831   // For each block.
5832   for (BasicBlock *BB : TheLoop->blocks()) {
5833     VectorizationCostTy BlockCost;
5834 
5835     // For each instruction in the old loop.
5836     for (Instruction &I : BB->instructionsWithoutDebug()) {
5837       // Skip ignored values.
5838       if (ValuesToIgnore.count(&I) || (VF > 1 && VecValuesToIgnore.count(&I)))
5839         continue;
5840 
5841       VectorizationCostTy C = getInstructionCost(&I, VF);
5842 
5843       // Check if we should override the cost.
5844       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
5845         C.first = ForceTargetInstructionCost;
5846 
5847       BlockCost.first += C.first;
5848       BlockCost.second |= C.second;
5849       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
5850                         << " for VF " << VF << " For instruction: " << I
5851                         << '\n');
5852     }
5853 
5854     // If we are vectorizing a predicated block, it will have been
5855     // if-converted. This means that the block's instructions (aside from
5856     // stores and instructions that may divide by zero) will now be
5857     // unconditionally executed. For the scalar case, we may not always execute
5858     // the predicated block. Thus, scale the block's cost by the probability of
5859     // executing it.
5860     if (VF == 1 && blockNeedsPredication(BB))
5861       BlockCost.first /= getReciprocalPredBlockProb();
5862 
5863     Cost.first += BlockCost.first;
5864     Cost.second |= BlockCost.second;
5865   }
5866 
5867   return Cost;
5868 }
5869 
5870 /// Gets Address Access SCEV after verifying that the access pattern
5871 /// is loop invariant except the induction variable dependence.
5872 ///
5873 /// This SCEV can be sent to the Target in order to estimate the address
5874 /// calculation cost.
5875 static const SCEV *getAddressAccessSCEV(
5876               Value *Ptr,
5877               LoopVectorizationLegality *Legal,
5878               PredicatedScalarEvolution &PSE,
5879               const Loop *TheLoop) {
5880 
5881   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
5882   if (!Gep)
5883     return nullptr;
5884 
5885   // We are looking for a gep with all loop invariant indices except for one
5886   // which should be an induction variable.
5887   auto SE = PSE.getSE();
5888   unsigned NumOperands = Gep->getNumOperands();
5889   for (unsigned i = 1; i < NumOperands; ++i) {
5890     Value *Opd = Gep->getOperand(i);
5891     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
5892         !Legal->isInductionVariable(Opd))
5893       return nullptr;
5894   }
5895 
5896   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
5897   return PSE.getSCEV(Ptr);
5898 }
5899 
5900 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
5901   return Legal->hasStride(I->getOperand(0)) ||
5902          Legal->hasStride(I->getOperand(1));
5903 }
5904 
5905 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5906                                                                  unsigned VF) {
5907   assert(VF > 1 && "Scalarization cost of instruction implies vectorization.");
5908   Type *ValTy = getMemInstValueType(I);
5909   auto SE = PSE.getSE();
5910 
5911   unsigned AS = getLoadStoreAddressSpace(I);
5912   Value *Ptr = getLoadStorePointerOperand(I);
5913   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
5914 
5915   // Figure out whether the access is strided and get the stride value
5916   // if it's known in compile time
5917   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
5918 
5919   // Get the cost of the scalar memory instruction and address computation.
5920   unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
5921 
5922   // Don't pass *I here, since it is scalar but will actually be part of a
5923   // vectorized loop where the user of it is a vectorized instruction.
5924   const Align Alignment = getLoadStoreAlignment(I);
5925   Cost += VF * TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(),
5926                                    Alignment, AS,
5927                                    TTI::TCK_RecipThroughput);
5928 
5929   // Get the overhead of the extractelement and insertelement instructions
5930   // we might create due to scalarization.
5931   Cost += getScalarizationOverhead(I, VF);
5932 
5933   // If we have a predicated store, it may not be executed for each vector
5934   // lane. Scale the cost by the probability of executing the predicated
5935   // block.
5936   if (isPredicatedInst(I)) {
5937     Cost /= getReciprocalPredBlockProb();
5938 
5939     if (useEmulatedMaskMemRefHack(I))
5940       // Artificially setting to a high enough value to practically disable
5941       // vectorization with such operations.
5942       Cost = 3000000;
5943   }
5944 
5945   return Cost;
5946 }
5947 
5948 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
5949                                                              unsigned VF) {
5950   Type *ValTy = getMemInstValueType(I);
5951   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5952   Value *Ptr = getLoadStorePointerOperand(I);
5953   unsigned AS = getLoadStoreAddressSpace(I);
5954   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
5955   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
5956 
5957   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5958          "Stride should be 1 or -1 for consecutive memory access");
5959   const Align Alignment = getLoadStoreAlignment(I);
5960   unsigned Cost = 0;
5961   if (Legal->isMaskRequired(I))
5962     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5963                                       CostKind);
5964   else
5965     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5966                                 CostKind, I);
5967 
5968   bool Reverse = ConsecutiveStride < 0;
5969   if (Reverse)
5970     Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5971   return Cost;
5972 }
5973 
5974 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
5975                                                          unsigned VF) {
5976   Type *ValTy = getMemInstValueType(I);
5977   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5978   const Align Alignment = getLoadStoreAlignment(I);
5979   unsigned AS = getLoadStoreAddressSpace(I);
5980   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
5981   if (isa<LoadInst>(I)) {
5982     return TTI.getAddressComputationCost(ValTy) +
5983            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
5984                                CostKind) +
5985            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
5986   }
5987   StoreInst *SI = cast<StoreInst>(I);
5988 
5989   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
5990   return TTI.getAddressComputationCost(ValTy) +
5991          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
5992                              CostKind) +
5993          (isLoopInvariantStoreValue
5994               ? 0
5995               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
5996                                        VF - 1));
5997 }
5998 
5999 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6000                                                           unsigned VF) {
6001   Type *ValTy = getMemInstValueType(I);
6002   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6003   const Align Alignment = getLoadStoreAlignment(I);
6004   const Value *Ptr = getLoadStorePointerOperand(I);
6005 
6006   return TTI.getAddressComputationCost(VectorTy) +
6007          TTI.getGatherScatterOpCost(
6008              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6009              TargetTransformInfo::TCK_RecipThroughput, I);
6010 }
6011 
6012 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6013                                                             unsigned VF) {
6014   Type *ValTy = getMemInstValueType(I);
6015   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6016   unsigned AS = getLoadStoreAddressSpace(I);
6017 
6018   auto Group = getInterleavedAccessGroup(I);
6019   assert(Group && "Fail to get an interleaved access group.");
6020 
6021   unsigned InterleaveFactor = Group->getFactor();
6022   auto *WideVecTy = FixedVectorType::get(ValTy, VF * InterleaveFactor);
6023 
6024   // Holds the indices of existing members in an interleaved load group.
6025   // An interleaved store group doesn't need this as it doesn't allow gaps.
6026   SmallVector<unsigned, 4> Indices;
6027   if (isa<LoadInst>(I)) {
6028     for (unsigned i = 0; i < InterleaveFactor; i++)
6029       if (Group->getMember(i))
6030         Indices.push_back(i);
6031   }
6032 
6033   // Calculate the cost of the whole interleaved group.
6034   bool UseMaskForGaps =
6035       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
6036   unsigned Cost = TTI.getInterleavedMemoryOpCost(
6037       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6038       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6039 
6040   if (Group->isReverse()) {
6041     // TODO: Add support for reversed masked interleaved access.
6042     assert(!Legal->isMaskRequired(I) &&
6043            "Reverse masked interleaved access not supported.");
6044     Cost += Group->getNumMembers() *
6045             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
6046   }
6047   return Cost;
6048 }
6049 
6050 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
6051                                                               unsigned VF) {
6052   // Calculate scalar cost only. Vectorization cost should be ready at this
6053   // moment.
6054   if (VF == 1) {
6055     Type *ValTy = getMemInstValueType(I);
6056     const Align Alignment = getLoadStoreAlignment(I);
6057     unsigned AS = getLoadStoreAddressSpace(I);
6058 
6059     return TTI.getAddressComputationCost(ValTy) +
6060            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
6061                                TTI::TCK_RecipThroughput, I);
6062   }
6063   return getWideningCost(I, VF);
6064 }
6065 
6066 LoopVectorizationCostModel::VectorizationCostTy
6067 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
6068   // If we know that this instruction will remain uniform, check the cost of
6069   // the scalar version.
6070   if (isUniformAfterVectorization(I, VF))
6071     VF = 1;
6072 
6073   if (VF > 1 && isProfitableToScalarize(I, VF))
6074     return VectorizationCostTy(InstsToScalarize[VF][I], false);
6075 
6076   // Forced scalars do not have any scalarization overhead.
6077   auto ForcedScalar = ForcedScalars.find(VF);
6078   if (VF > 1 && ForcedScalar != ForcedScalars.end()) {
6079     auto InstSet = ForcedScalar->second;
6080     if (InstSet.count(I))
6081       return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false);
6082   }
6083 
6084   Type *VectorTy;
6085   unsigned C = getInstructionCost(I, VF, VectorTy);
6086 
6087   bool TypeNotScalarized =
6088       VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF;
6089   return VectorizationCostTy(C, TypeNotScalarized);
6090 }
6091 
6092 unsigned LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
6093                                                               unsigned VF) {
6094 
6095   if (VF == 1)
6096     return 0;
6097 
6098   unsigned Cost = 0;
6099   Type *RetTy = ToVectorTy(I->getType(), VF);
6100   if (!RetTy->isVoidTy() &&
6101       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
6102     Cost += TTI.getScalarizationOverhead(
6103         cast<VectorType>(RetTy), APInt::getAllOnesValue(VF), true, false);
6104 
6105   // Some targets keep addresses scalar.
6106   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
6107     return Cost;
6108 
6109   // Some targets support efficient element stores.
6110   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
6111     return Cost;
6112 
6113   // Collect operands to consider.
6114   CallInst *CI = dyn_cast<CallInst>(I);
6115   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
6116 
6117   // Skip operands that do not require extraction/scalarization and do not incur
6118   // any overhead.
6119   return Cost + TTI.getOperandsScalarizationOverhead(
6120                     filterExtractingOperands(Ops, VF), VF);
6121 }
6122 
6123 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) {
6124   if (VF == 1)
6125     return;
6126   NumPredStores = 0;
6127   for (BasicBlock *BB : TheLoop->blocks()) {
6128     // For each instruction in the old loop.
6129     for (Instruction &I : *BB) {
6130       Value *Ptr =  getLoadStorePointerOperand(&I);
6131       if (!Ptr)
6132         continue;
6133 
6134       // TODO: We should generate better code and update the cost model for
6135       // predicated uniform stores. Today they are treated as any other
6136       // predicated store (see added test cases in
6137       // invariant-store-vectorization.ll).
6138       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
6139         NumPredStores++;
6140 
6141       if (Legal->isUniform(Ptr) &&
6142           // Conditional loads and stores should be scalarized and predicated.
6143           // isScalarWithPredication cannot be used here since masked
6144           // gather/scatters are not considered scalar with predication.
6145           !Legal->blockNeedsPredication(I.getParent())) {
6146         // TODO: Avoid replicating loads and stores instead of
6147         // relying on instcombine to remove them.
6148         // Load: Scalar load + broadcast
6149         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
6150         unsigned Cost = getUniformMemOpCost(&I, VF);
6151         setWideningDecision(&I, VF, CM_Scalarize, Cost);
6152         continue;
6153       }
6154 
6155       // We assume that widening is the best solution when possible.
6156       if (memoryInstructionCanBeWidened(&I, VF)) {
6157         unsigned Cost = getConsecutiveMemOpCost(&I, VF);
6158         int ConsecutiveStride =
6159                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
6160         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6161                "Expected consecutive stride.");
6162         InstWidening Decision =
6163             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
6164         setWideningDecision(&I, VF, Decision, Cost);
6165         continue;
6166       }
6167 
6168       // Choose between Interleaving, Gather/Scatter or Scalarization.
6169       unsigned InterleaveCost = std::numeric_limits<unsigned>::max();
6170       unsigned NumAccesses = 1;
6171       if (isAccessInterleaved(&I)) {
6172         auto Group = getInterleavedAccessGroup(&I);
6173         assert(Group && "Fail to get an interleaved access group.");
6174 
6175         // Make one decision for the whole group.
6176         if (getWideningDecision(&I, VF) != CM_Unknown)
6177           continue;
6178 
6179         NumAccesses = Group->getNumMembers();
6180         if (interleavedAccessCanBeWidened(&I, VF))
6181           InterleaveCost = getInterleaveGroupCost(&I, VF);
6182       }
6183 
6184       unsigned GatherScatterCost =
6185           isLegalGatherOrScatter(&I)
6186               ? getGatherScatterCost(&I, VF) * NumAccesses
6187               : std::numeric_limits<unsigned>::max();
6188 
6189       unsigned ScalarizationCost =
6190           getMemInstScalarizationCost(&I, VF) * NumAccesses;
6191 
6192       // Choose better solution for the current VF,
6193       // write down this decision and use it during vectorization.
6194       unsigned Cost;
6195       InstWidening Decision;
6196       if (InterleaveCost <= GatherScatterCost &&
6197           InterleaveCost < ScalarizationCost) {
6198         Decision = CM_Interleave;
6199         Cost = InterleaveCost;
6200       } else if (GatherScatterCost < ScalarizationCost) {
6201         Decision = CM_GatherScatter;
6202         Cost = GatherScatterCost;
6203       } else {
6204         Decision = CM_Scalarize;
6205         Cost = ScalarizationCost;
6206       }
6207       // If the instructions belongs to an interleave group, the whole group
6208       // receives the same decision. The whole group receives the cost, but
6209       // the cost will actually be assigned to one instruction.
6210       if (auto Group = getInterleavedAccessGroup(&I))
6211         setWideningDecision(Group, VF, Decision, Cost);
6212       else
6213         setWideningDecision(&I, VF, Decision, Cost);
6214     }
6215   }
6216 
6217   // Make sure that any load of address and any other address computation
6218   // remains scalar unless there is gather/scatter support. This avoids
6219   // inevitable extracts into address registers, and also has the benefit of
6220   // activating LSR more, since that pass can't optimize vectorized
6221   // addresses.
6222   if (TTI.prefersVectorizedAddressing())
6223     return;
6224 
6225   // Start with all scalar pointer uses.
6226   SmallPtrSet<Instruction *, 8> AddrDefs;
6227   for (BasicBlock *BB : TheLoop->blocks())
6228     for (Instruction &I : *BB) {
6229       Instruction *PtrDef =
6230         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
6231       if (PtrDef && TheLoop->contains(PtrDef) &&
6232           getWideningDecision(&I, VF) != CM_GatherScatter)
6233         AddrDefs.insert(PtrDef);
6234     }
6235 
6236   // Add all instructions used to generate the addresses.
6237   SmallVector<Instruction *, 4> Worklist;
6238   for (auto *I : AddrDefs)
6239     Worklist.push_back(I);
6240   while (!Worklist.empty()) {
6241     Instruction *I = Worklist.pop_back_val();
6242     for (auto &Op : I->operands())
6243       if (auto *InstOp = dyn_cast<Instruction>(Op))
6244         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
6245             AddrDefs.insert(InstOp).second)
6246           Worklist.push_back(InstOp);
6247   }
6248 
6249   for (auto *I : AddrDefs) {
6250     if (isa<LoadInst>(I)) {
6251       // Setting the desired widening decision should ideally be handled in
6252       // by cost functions, but since this involves the task of finding out
6253       // if the loaded register is involved in an address computation, it is
6254       // instead changed here when we know this is the case.
6255       InstWidening Decision = getWideningDecision(I, VF);
6256       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
6257         // Scalarize a widened load of address.
6258         setWideningDecision(I, VF, CM_Scalarize,
6259                             (VF * getMemoryInstructionCost(I, 1)));
6260       else if (auto Group = getInterleavedAccessGroup(I)) {
6261         // Scalarize an interleave group of address loads.
6262         for (unsigned I = 0; I < Group->getFactor(); ++I) {
6263           if (Instruction *Member = Group->getMember(I))
6264             setWideningDecision(Member, VF, CM_Scalarize,
6265                                 (VF * getMemoryInstructionCost(Member, 1)));
6266         }
6267       }
6268     } else
6269       // Make sure I gets scalarized and a cost estimate without
6270       // scalarization overhead.
6271       ForcedScalars[VF].insert(I);
6272   }
6273 }
6274 
6275 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6276                                                         unsigned VF,
6277                                                         Type *&VectorTy) {
6278   Type *RetTy = I->getType();
6279   if (canTruncateToMinimalBitwidth(I, VF))
6280     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
6281   VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
6282   auto SE = PSE.getSE();
6283   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6284 
6285   // TODO: We need to estimate the cost of intrinsic calls.
6286   switch (I->getOpcode()) {
6287   case Instruction::GetElementPtr:
6288     // We mark this instruction as zero-cost because the cost of GEPs in
6289     // vectorized code depends on whether the corresponding memory instruction
6290     // is scalarized or not. Therefore, we handle GEPs with the memory
6291     // instruction cost.
6292     return 0;
6293   case Instruction::Br: {
6294     // In cases of scalarized and predicated instructions, there will be VF
6295     // predicated blocks in the vectorized loop. Each branch around these
6296     // blocks requires also an extract of its vector compare i1 element.
6297     bool ScalarPredicatedBB = false;
6298     BranchInst *BI = cast<BranchInst>(I);
6299     if (VF > 1 && BI->isConditional() &&
6300         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
6301          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
6302       ScalarPredicatedBB = true;
6303 
6304     if (ScalarPredicatedBB) {
6305       // Return cost for branches around scalarized and predicated blocks.
6306       auto *Vec_i1Ty =
6307           FixedVectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
6308       return (TTI.getScalarizationOverhead(Vec_i1Ty, APInt::getAllOnesValue(VF),
6309                                            false, true) +
6310               (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF));
6311     } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1)
6312       // The back-edge branch will remain, as will all scalar branches.
6313       return TTI.getCFInstrCost(Instruction::Br, CostKind);
6314     else
6315       // This branch will be eliminated by if-conversion.
6316       return 0;
6317     // Note: We currently assume zero cost for an unconditional branch inside
6318     // a predicated block since it will become a fall-through, although we
6319     // may decide in the future to call TTI for all branches.
6320   }
6321   case Instruction::PHI: {
6322     auto *Phi = cast<PHINode>(I);
6323 
6324     // First-order recurrences are replaced by vector shuffles inside the loop.
6325     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
6326     if (VF > 1 && Legal->isFirstOrderRecurrence(Phi))
6327       return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
6328                                 cast<VectorType>(VectorTy), VF - 1,
6329                                 FixedVectorType::get(RetTy, 1));
6330 
6331     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
6332     // converted into select instructions. We require N - 1 selects per phi
6333     // node, where N is the number of incoming values.
6334     if (VF > 1 && Phi->getParent() != TheLoop->getHeader())
6335       return (Phi->getNumIncomingValues() - 1) *
6336              TTI.getCmpSelInstrCost(
6337                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
6338                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
6339                  CostKind);
6340 
6341     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
6342   }
6343   case Instruction::UDiv:
6344   case Instruction::SDiv:
6345   case Instruction::URem:
6346   case Instruction::SRem:
6347     // If we have a predicated instruction, it may not be executed for each
6348     // vector lane. Get the scalarization cost and scale this amount by the
6349     // probability of executing the predicated block. If the instruction is not
6350     // predicated, we fall through to the next case.
6351     if (VF > 1 && isScalarWithPredication(I)) {
6352       unsigned Cost = 0;
6353 
6354       // These instructions have a non-void type, so account for the phi nodes
6355       // that we will create. This cost is likely to be zero. The phi node
6356       // cost, if any, should be scaled by the block probability because it
6357       // models a copy at the end of each predicated block.
6358       Cost += VF * TTI.getCFInstrCost(Instruction::PHI, CostKind);
6359 
6360       // The cost of the non-predicated instruction.
6361       Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
6362 
6363       // The cost of insertelement and extractelement instructions needed for
6364       // scalarization.
6365       Cost += getScalarizationOverhead(I, VF);
6366 
6367       // Scale the cost by the probability of executing the predicated blocks.
6368       // This assumes the predicated block for each vector lane is equally
6369       // likely.
6370       return Cost / getReciprocalPredBlockProb();
6371     }
6372     LLVM_FALLTHROUGH;
6373   case Instruction::Add:
6374   case Instruction::FAdd:
6375   case Instruction::Sub:
6376   case Instruction::FSub:
6377   case Instruction::Mul:
6378   case Instruction::FMul:
6379   case Instruction::FDiv:
6380   case Instruction::FRem:
6381   case Instruction::Shl:
6382   case Instruction::LShr:
6383   case Instruction::AShr:
6384   case Instruction::And:
6385   case Instruction::Or:
6386   case Instruction::Xor: {
6387     // Since we will replace the stride by 1 the multiplication should go away.
6388     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
6389       return 0;
6390     // Certain instructions can be cheaper to vectorize if they have a constant
6391     // second vector operand. One example of this are shifts on x86.
6392     Value *Op2 = I->getOperand(1);
6393     TargetTransformInfo::OperandValueProperties Op2VP;
6394     TargetTransformInfo::OperandValueKind Op2VK =
6395         TTI.getOperandInfo(Op2, Op2VP);
6396     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
6397       Op2VK = TargetTransformInfo::OK_UniformValue;
6398 
6399     SmallVector<const Value *, 4> Operands(I->operand_values());
6400     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6401     return N * TTI.getArithmeticInstrCost(
6402                    I->getOpcode(), VectorTy, CostKind,
6403                    TargetTransformInfo::OK_AnyValue,
6404                    Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
6405   }
6406   case Instruction::FNeg: {
6407     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6408     return N * TTI.getArithmeticInstrCost(
6409                    I->getOpcode(), VectorTy, CostKind,
6410                    TargetTransformInfo::OK_AnyValue,
6411                    TargetTransformInfo::OK_AnyValue,
6412                    TargetTransformInfo::OP_None, TargetTransformInfo::OP_None,
6413                    I->getOperand(0), I);
6414   }
6415   case Instruction::Select: {
6416     SelectInst *SI = cast<SelectInst>(I);
6417     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
6418     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
6419     Type *CondTy = SI->getCondition()->getType();
6420     if (!ScalarCond)
6421       CondTy = FixedVectorType::get(CondTy, VF);
6422 
6423     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy,
6424                                   CostKind, I);
6425   }
6426   case Instruction::ICmp:
6427   case Instruction::FCmp: {
6428     Type *ValTy = I->getOperand(0)->getType();
6429     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
6430     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
6431       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
6432     VectorTy = ToVectorTy(ValTy, VF);
6433     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, CostKind,
6434                                   I);
6435   }
6436   case Instruction::Store:
6437   case Instruction::Load: {
6438     unsigned Width = VF;
6439     if (Width > 1) {
6440       InstWidening Decision = getWideningDecision(I, Width);
6441       assert(Decision != CM_Unknown &&
6442              "CM decision should be taken at this point");
6443       if (Decision == CM_Scalarize)
6444         Width = 1;
6445     }
6446     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
6447     return getMemoryInstructionCost(I, VF);
6448   }
6449   case Instruction::ZExt:
6450   case Instruction::SExt:
6451   case Instruction::FPToUI:
6452   case Instruction::FPToSI:
6453   case Instruction::FPExt:
6454   case Instruction::PtrToInt:
6455   case Instruction::IntToPtr:
6456   case Instruction::SIToFP:
6457   case Instruction::UIToFP:
6458   case Instruction::Trunc:
6459   case Instruction::FPTrunc:
6460   case Instruction::BitCast: {
6461     // We optimize the truncation of induction variables having constant
6462     // integer steps. The cost of these truncations is the same as the scalar
6463     // operation.
6464     if (isOptimizableIVTruncate(I, VF)) {
6465       auto *Trunc = cast<TruncInst>(I);
6466       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6467                                   Trunc->getSrcTy(), CostKind, Trunc);
6468     }
6469 
6470     Type *SrcScalarTy = I->getOperand(0)->getType();
6471     Type *SrcVecTy =
6472         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6473     if (canTruncateToMinimalBitwidth(I, VF)) {
6474       // This cast is going to be shrunk. This may remove the cast or it might
6475       // turn it into slightly different cast. For example, if MinBW == 16,
6476       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
6477       //
6478       // Calculate the modified src and dest types.
6479       Type *MinVecTy = VectorTy;
6480       if (I->getOpcode() == Instruction::Trunc) {
6481         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
6482         VectorTy =
6483             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6484       } else if (I->getOpcode() == Instruction::ZExt ||
6485                  I->getOpcode() == Instruction::SExt) {
6486         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
6487         VectorTy =
6488             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6489       }
6490     }
6491 
6492     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6493     return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy,
6494                                     CostKind, I);
6495   }
6496   case Instruction::Call: {
6497     bool NeedToScalarize;
6498     CallInst *CI = cast<CallInst>(I);
6499     unsigned CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
6500     if (getVectorIntrinsicIDForCall(CI, TLI))
6501       return std::min(CallCost, getVectorIntrinsicCost(CI, VF));
6502     return CallCost;
6503   }
6504   default:
6505     // The cost of executing VF copies of the scalar instruction. This opcode
6506     // is unknown. Assume that it is the same as 'mul'.
6507     return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy,
6508                                            CostKind) +
6509            getScalarizationOverhead(I, VF);
6510   } // end of switch.
6511 }
6512 
6513 char LoopVectorize::ID = 0;
6514 
6515 static const char lv_name[] = "Loop Vectorization";
6516 
6517 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
6518 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
6519 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
6520 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
6521 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
6522 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
6523 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
6524 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
6525 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
6526 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
6527 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
6528 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
6529 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
6530 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
6531 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
6532 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
6533 
6534 namespace llvm {
6535 
6536 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
6537 
6538 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
6539                               bool VectorizeOnlyWhenForced) {
6540   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
6541 }
6542 
6543 } // end namespace llvm
6544 
6545 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
6546   // Check if the pointer operand of a load or store instruction is
6547   // consecutive.
6548   if (auto *Ptr = getLoadStorePointerOperand(Inst))
6549     return Legal->isConsecutivePtr(Ptr);
6550   return false;
6551 }
6552 
6553 void LoopVectorizationCostModel::collectValuesToIgnore() {
6554   // Ignore ephemeral values.
6555   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
6556 
6557   // Ignore type-promoting instructions we identified during reduction
6558   // detection.
6559   for (auto &Reduction : Legal->getReductionVars()) {
6560     RecurrenceDescriptor &RedDes = Reduction.second;
6561     SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6562     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6563   }
6564   // Ignore type-casting instructions we identified during induction
6565   // detection.
6566   for (auto &Induction : Legal->getInductionVars()) {
6567     InductionDescriptor &IndDes = Induction.second;
6568     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6569     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6570   }
6571 }
6572 
6573 // TODO: we could return a pair of values that specify the max VF and
6574 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
6575 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
6576 // doesn't have a cost model that can choose which plan to execute if
6577 // more than one is generated.
6578 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
6579                                  LoopVectorizationCostModel &CM) {
6580   unsigned WidestType;
6581   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
6582   return WidestVectorRegBits / WidestType;
6583 }
6584 
6585 VectorizationFactor
6586 LoopVectorizationPlanner::planInVPlanNativePath(unsigned UserVF) {
6587   unsigned VF = UserVF;
6588   // Outer loop handling: They may require CFG and instruction level
6589   // transformations before even evaluating whether vectorization is profitable.
6590   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6591   // the vectorization pipeline.
6592   if (!OrigLoop->empty()) {
6593     // If the user doesn't provide a vectorization factor, determine a
6594     // reasonable one.
6595     if (!UserVF) {
6596       VF = determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM);
6597       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
6598 
6599       // Make sure we have a VF > 1 for stress testing.
6600       if (VPlanBuildStressTest && VF < 2) {
6601         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
6602                           << "overriding computed VF.\n");
6603         VF = 4;
6604       }
6605     }
6606     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6607     assert(isPowerOf2_32(VF) && "VF needs to be a power of two");
6608     LLVM_DEBUG(dbgs() << "LV: Using " << (UserVF ? "user " : "") << "VF " << VF
6609                       << " to build VPlans.\n");
6610     buildVPlans(VF, VF);
6611 
6612     // For VPlan build stress testing, we bail out after VPlan construction.
6613     if (VPlanBuildStressTest)
6614       return VectorizationFactor::Disabled();
6615 
6616     return {VF, 0};
6617   }
6618 
6619   LLVM_DEBUG(
6620       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6621                 "VPlan-native path.\n");
6622   return VectorizationFactor::Disabled();
6623 }
6624 
6625 Optional<VectorizationFactor> LoopVectorizationPlanner::plan(unsigned UserVF,
6626                                                              unsigned UserIC) {
6627   assert(OrigLoop->empty() && "Inner loop expected.");
6628   Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC);
6629   if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved.
6630     return None;
6631 
6632   // Invalidate interleave groups if all blocks of loop will be predicated.
6633   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
6634       !useMaskedInterleavedAccesses(*TTI)) {
6635     LLVM_DEBUG(
6636         dbgs()
6637         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
6638            "which requires masked-interleaved support.\n");
6639     if (CM.InterleaveInfo.invalidateGroups())
6640       // Invalidating interleave groups also requires invalidating all decisions
6641       // based on them, which includes widening decisions and uniform and scalar
6642       // values.
6643       CM.invalidateCostModelingDecisions();
6644   }
6645 
6646   if (UserVF) {
6647     LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6648     assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
6649     // Collect the instructions (and their associated costs) that will be more
6650     // profitable to scalarize.
6651     CM.selectUserVectorizationFactor(UserVF);
6652     buildVPlansWithVPRecipes(UserVF, UserVF);
6653     LLVM_DEBUG(printPlans(dbgs()));
6654     return {{UserVF, 0}};
6655   }
6656 
6657   unsigned MaxVF = MaybeMaxVF.getValue();
6658   assert(MaxVF != 0 && "MaxVF is zero.");
6659 
6660   for (unsigned VF = 1; VF <= MaxVF; VF *= 2) {
6661     // Collect Uniform and Scalar instructions after vectorization with VF.
6662     CM.collectUniformsAndScalars(VF);
6663 
6664     // Collect the instructions (and their associated costs) that will be more
6665     // profitable to scalarize.
6666     if (VF > 1)
6667       CM.collectInstsToScalarize(VF);
6668   }
6669 
6670   buildVPlansWithVPRecipes(1, MaxVF);
6671   LLVM_DEBUG(printPlans(dbgs()));
6672   if (MaxVF == 1)
6673     return VectorizationFactor::Disabled();
6674 
6675   // Select the optimal vectorization factor.
6676   return CM.selectVectorizationFactor(MaxVF);
6677 }
6678 
6679 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) {
6680   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
6681                     << '\n');
6682   BestVF = VF;
6683   BestUF = UF;
6684 
6685   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
6686     return !Plan->hasVF(VF);
6687   });
6688   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
6689 }
6690 
6691 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
6692                                            DominatorTree *DT) {
6693   // Perform the actual loop transformation.
6694 
6695   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
6696   VPCallbackILV CallbackILV(ILV);
6697 
6698   VPTransformState State{BestVF, BestUF,      LI,
6699                          DT,     ILV.Builder, ILV.VectorLoopValueMap,
6700                          &ILV,   CallbackILV};
6701   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
6702   State.TripCount = ILV.getOrCreateTripCount(nullptr);
6703   State.CanonicalIV = ILV.Induction;
6704 
6705   //===------------------------------------------------===//
6706   //
6707   // Notice: any optimization or new instruction that go
6708   // into the code below should also be implemented in
6709   // the cost-model.
6710   //
6711   //===------------------------------------------------===//
6712 
6713   // 2. Copy and widen instructions from the old loop into the new loop.
6714   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
6715   VPlans.front()->execute(&State);
6716 
6717   // 3. Fix the vectorized code: take care of header phi's, live-outs,
6718   //    predication, updating analyses.
6719   ILV.fixVectorizedLoop();
6720 }
6721 
6722 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
6723     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
6724   BasicBlock *Latch = OrigLoop->getLoopLatch();
6725 
6726   // We create new control-flow for the vectorized loop, so the original
6727   // condition will be dead after vectorization if it's only used by the
6728   // branch.
6729   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
6730   if (Cmp && Cmp->hasOneUse())
6731     DeadInstructions.insert(Cmp);
6732 
6733   // We create new "steps" for induction variable updates to which the original
6734   // induction variables map. An original update instruction will be dead if
6735   // all its users except the induction variable are dead.
6736   for (auto &Induction : Legal->getInductionVars()) {
6737     PHINode *Ind = Induction.first;
6738     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
6739     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
6740           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
6741         }))
6742       DeadInstructions.insert(IndUpdate);
6743 
6744     // We record as "Dead" also the type-casting instructions we had identified
6745     // during induction analysis. We don't need any handling for them in the
6746     // vectorized loop because we have proven that, under a proper runtime
6747     // test guarding the vectorized loop, the value of the phi, and the casted
6748     // value of the phi, are the same. The last instruction in this casting chain
6749     // will get its scalar/vector/widened def from the scalar/vector/widened def
6750     // of the respective phi node. Any other casts in the induction def-use chain
6751     // have no other uses outside the phi update chain, and will be ignored.
6752     InductionDescriptor &IndDes = Induction.second;
6753     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6754     DeadInstructions.insert(Casts.begin(), Casts.end());
6755   }
6756 }
6757 
6758 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
6759 
6760 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
6761 
6762 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
6763                                         Instruction::BinaryOps BinOp) {
6764   // When unrolling and the VF is 1, we only need to add a simple scalar.
6765   Type *Ty = Val->getType();
6766   assert(!Ty->isVectorTy() && "Val must be a scalar");
6767 
6768   if (Ty->isFloatingPointTy()) {
6769     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
6770 
6771     // Floating point operations had to be 'fast' to enable the unrolling.
6772     Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
6773     return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
6774   }
6775   Constant *C = ConstantInt::get(Ty, StartIdx);
6776   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
6777 }
6778 
6779 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
6780   SmallVector<Metadata *, 4> MDs;
6781   // Reserve first location for self reference to the LoopID metadata node.
6782   MDs.push_back(nullptr);
6783   bool IsUnrollMetadata = false;
6784   MDNode *LoopID = L->getLoopID();
6785   if (LoopID) {
6786     // First find existing loop unrolling disable metadata.
6787     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
6788       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
6789       if (MD) {
6790         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
6791         IsUnrollMetadata =
6792             S && S->getString().startswith("llvm.loop.unroll.disable");
6793       }
6794       MDs.push_back(LoopID->getOperand(i));
6795     }
6796   }
6797 
6798   if (!IsUnrollMetadata) {
6799     // Add runtime unroll disable metadata.
6800     LLVMContext &Context = L->getHeader()->getContext();
6801     SmallVector<Metadata *, 1> DisableOperands;
6802     DisableOperands.push_back(
6803         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
6804     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
6805     MDs.push_back(DisableNode);
6806     MDNode *NewLoopID = MDNode::get(Context, MDs);
6807     // Set operand 0 to refer to the loop id itself.
6808     NewLoopID->replaceOperandWith(0, NewLoopID);
6809     L->setLoopID(NewLoopID);
6810   }
6811 }
6812 
6813 bool LoopVectorizationPlanner::getDecisionAndClampRange(
6814     const std::function<bool(unsigned)> &Predicate, VFRange &Range) {
6815   assert(Range.End > Range.Start && "Trying to test an empty VF range.");
6816   bool PredicateAtRangeStart = Predicate(Range.Start);
6817 
6818   for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2)
6819     if (Predicate(TmpVF) != PredicateAtRangeStart) {
6820       Range.End = TmpVF;
6821       break;
6822     }
6823 
6824   return PredicateAtRangeStart;
6825 }
6826 
6827 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
6828 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
6829 /// of VF's starting at a given VF and extending it as much as possible. Each
6830 /// vectorization decision can potentially shorten this sub-range during
6831 /// buildVPlan().
6832 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) {
6833   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
6834     VFRange SubRange = {VF, MaxVF + 1};
6835     VPlans.push_back(buildVPlan(SubRange));
6836     VF = SubRange.End;
6837   }
6838 }
6839 
6840 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
6841                                          VPlanPtr &Plan) {
6842   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
6843 
6844   // Look for cached value.
6845   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
6846   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
6847   if (ECEntryIt != EdgeMaskCache.end())
6848     return ECEntryIt->second;
6849 
6850   VPValue *SrcMask = createBlockInMask(Src, Plan);
6851 
6852   // The terminator has to be a branch inst!
6853   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
6854   assert(BI && "Unexpected terminator found");
6855 
6856   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
6857     return EdgeMaskCache[Edge] = SrcMask;
6858 
6859   VPValue *EdgeMask = Plan->getVPValue(BI->getCondition());
6860   assert(EdgeMask && "No Edge Mask found for condition");
6861 
6862   if (BI->getSuccessor(0) != Dst)
6863     EdgeMask = Builder.createNot(EdgeMask);
6864 
6865   if (SrcMask) // Otherwise block in-mask is all-one, no need to AND.
6866     EdgeMask = Builder.createAnd(EdgeMask, SrcMask);
6867 
6868   return EdgeMaskCache[Edge] = EdgeMask;
6869 }
6870 
6871 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
6872   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
6873 
6874   // Look for cached value.
6875   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
6876   if (BCEntryIt != BlockMaskCache.end())
6877     return BCEntryIt->second;
6878 
6879   // All-one mask is modelled as no-mask following the convention for masked
6880   // load/store/gather/scatter. Initialize BlockMask to no-mask.
6881   VPValue *BlockMask = nullptr;
6882 
6883   if (OrigLoop->getHeader() == BB) {
6884     if (!CM.blockNeedsPredication(BB))
6885       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
6886 
6887     // Introduce the early-exit compare IV <= BTC to form header block mask.
6888     // This is used instead of IV < TC because TC may wrap, unlike BTC.
6889     // Start by constructing the desired canonical IV.
6890     VPValue *IV = nullptr;
6891     if (Legal->getPrimaryInduction())
6892       IV = Plan->getVPValue(Legal->getPrimaryInduction());
6893     else {
6894       auto IVRecipe = new VPWidenCanonicalIVRecipe();
6895       Builder.getInsertBlock()->appendRecipe(IVRecipe);
6896       IV = IVRecipe->getVPValue();
6897     }
6898     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
6899     bool TailFolded = !CM.isScalarEpilogueAllowed();
6900     if (TailFolded && CM.TTI.emitGetActiveLaneMask())
6901       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, BTC});
6902     else
6903       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
6904     return BlockMaskCache[BB] = BlockMask;
6905   }
6906 
6907   // This is the block mask. We OR all incoming edges.
6908   for (auto *Predecessor : predecessors(BB)) {
6909     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
6910     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
6911       return BlockMaskCache[BB] = EdgeMask;
6912 
6913     if (!BlockMask) { // BlockMask has its initialized nullptr value.
6914       BlockMask = EdgeMask;
6915       continue;
6916     }
6917 
6918     BlockMask = Builder.createOr(BlockMask, EdgeMask);
6919   }
6920 
6921   return BlockMaskCache[BB] = BlockMask;
6922 }
6923 
6924 VPWidenMemoryInstructionRecipe *
6925 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range,
6926                                   VPlanPtr &Plan) {
6927   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
6928          "Must be called with either a load or store");
6929 
6930   auto willWiden = [&](unsigned VF) -> bool {
6931     if (VF == 1)
6932       return false;
6933     LoopVectorizationCostModel::InstWidening Decision =
6934         CM.getWideningDecision(I, VF);
6935     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
6936            "CM decision should be taken at this point.");
6937     if (Decision == LoopVectorizationCostModel::CM_Interleave)
6938       return true;
6939     if (CM.isScalarAfterVectorization(I, VF) ||
6940         CM.isProfitableToScalarize(I, VF))
6941       return false;
6942     return Decision != LoopVectorizationCostModel::CM_Scalarize;
6943   };
6944 
6945   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
6946     return nullptr;
6947 
6948   VPValue *Mask = nullptr;
6949   if (Legal->isMaskRequired(I))
6950     Mask = createBlockInMask(I->getParent(), Plan);
6951 
6952   VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I));
6953   if (LoadInst *Load = dyn_cast<LoadInst>(I))
6954     return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask);
6955 
6956   StoreInst *Store = cast<StoreInst>(I);
6957   VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand());
6958   return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask);
6959 }
6960 
6961 VPWidenIntOrFpInductionRecipe *
6962 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi) const {
6963   // Check if this is an integer or fp induction. If so, build the recipe that
6964   // produces its scalar and vector values.
6965   InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
6966   if (II.getKind() == InductionDescriptor::IK_IntInduction ||
6967       II.getKind() == InductionDescriptor::IK_FpInduction)
6968     return new VPWidenIntOrFpInductionRecipe(Phi);
6969 
6970   return nullptr;
6971 }
6972 
6973 VPWidenIntOrFpInductionRecipe *
6974 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I,
6975                                                 VFRange &Range) const {
6976   // Optimize the special case where the source is a constant integer
6977   // induction variable. Notice that we can only optimize the 'trunc' case
6978   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
6979   // (c) other casts depend on pointer size.
6980 
6981   // Determine whether \p K is a truncation based on an induction variable that
6982   // can be optimized.
6983   auto isOptimizableIVTruncate =
6984       [&](Instruction *K) -> std::function<bool(unsigned)> {
6985     return
6986         [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); };
6987   };
6988 
6989   if (LoopVectorizationPlanner::getDecisionAndClampRange(
6990           isOptimizableIVTruncate(I), Range))
6991     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
6992                                              I);
6993   return nullptr;
6994 }
6995 
6996 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) {
6997   // We know that all PHIs in non-header blocks are converted into selects, so
6998   // we don't have to worry about the insertion order and we can just use the
6999   // builder. At this point we generate the predication tree. There may be
7000   // duplications since this is a simple recursive scan, but future
7001   // optimizations will clean it up.
7002 
7003   SmallVector<VPValue *, 2> Operands;
7004   unsigned NumIncoming = Phi->getNumIncomingValues();
7005   for (unsigned In = 0; In < NumIncoming; In++) {
7006     VPValue *EdgeMask =
7007       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
7008     assert((EdgeMask || NumIncoming == 1) &&
7009            "Multiple predecessors with one having a full mask");
7010     Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In)));
7011     if (EdgeMask)
7012       Operands.push_back(EdgeMask);
7013   }
7014   return new VPBlendRecipe(Phi, Operands);
7015 }
7016 
7017 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range,
7018                                                    VPlan &Plan) const {
7019 
7020   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
7021       [this, CI](unsigned VF) { return CM.isScalarWithPredication(CI, VF); },
7022       Range);
7023 
7024   if (IsPredicated)
7025     return nullptr;
7026 
7027   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
7028   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
7029              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect))
7030     return nullptr;
7031 
7032   auto willWiden = [&](unsigned VF) -> bool {
7033     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
7034     // The following case may be scalarized depending on the VF.
7035     // The flag shows whether we use Intrinsic or a usual Call for vectorized
7036     // version of the instruction.
7037     // Is it beneficial to perform intrinsic call compared to lib call?
7038     bool NeedToScalarize = false;
7039     unsigned CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
7040     bool UseVectorIntrinsic =
7041         ID && CM.getVectorIntrinsicCost(CI, VF) <= CallCost;
7042     return UseVectorIntrinsic || !NeedToScalarize;
7043   };
7044 
7045   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
7046     return nullptr;
7047 
7048   return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands()));
7049 }
7050 
7051 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
7052   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
7053          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
7054   // Instruction should be widened, unless it is scalar after vectorization,
7055   // scalarization is profitable or it is predicated.
7056   auto WillScalarize = [this, I](unsigned VF) -> bool {
7057     return CM.isScalarAfterVectorization(I, VF) ||
7058            CM.isProfitableToScalarize(I, VF) ||
7059            CM.isScalarWithPredication(I, VF);
7060   };
7061   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
7062                                                              Range);
7063 }
7064 
7065 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const {
7066   auto IsVectorizableOpcode = [](unsigned Opcode) {
7067     switch (Opcode) {
7068     case Instruction::Add:
7069     case Instruction::And:
7070     case Instruction::AShr:
7071     case Instruction::BitCast:
7072     case Instruction::FAdd:
7073     case Instruction::FCmp:
7074     case Instruction::FDiv:
7075     case Instruction::FMul:
7076     case Instruction::FNeg:
7077     case Instruction::FPExt:
7078     case Instruction::FPToSI:
7079     case Instruction::FPToUI:
7080     case Instruction::FPTrunc:
7081     case Instruction::FRem:
7082     case Instruction::FSub:
7083     case Instruction::ICmp:
7084     case Instruction::IntToPtr:
7085     case Instruction::LShr:
7086     case Instruction::Mul:
7087     case Instruction::Or:
7088     case Instruction::PtrToInt:
7089     case Instruction::SDiv:
7090     case Instruction::Select:
7091     case Instruction::SExt:
7092     case Instruction::Shl:
7093     case Instruction::SIToFP:
7094     case Instruction::SRem:
7095     case Instruction::Sub:
7096     case Instruction::Trunc:
7097     case Instruction::UDiv:
7098     case Instruction::UIToFP:
7099     case Instruction::URem:
7100     case Instruction::Xor:
7101     case Instruction::ZExt:
7102       return true;
7103     }
7104     return false;
7105   };
7106 
7107   if (!IsVectorizableOpcode(I->getOpcode()))
7108     return nullptr;
7109 
7110   // Success: widen this instruction.
7111   return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands()));
7112 }
7113 
7114 VPBasicBlock *VPRecipeBuilder::handleReplication(
7115     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
7116     DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
7117     VPlanPtr &Plan) {
7118   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
7119       [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); },
7120       Range);
7121 
7122   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
7123       [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range);
7124 
7125   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
7126                                        IsUniform, IsPredicated);
7127   setRecipe(I, Recipe);
7128 
7129   // Find if I uses a predicated instruction. If so, it will use its scalar
7130   // value. Avoid hoisting the insert-element which packs the scalar value into
7131   // a vector value, as that happens iff all users use the vector value.
7132   for (auto &Op : I->operands())
7133     if (auto *PredInst = dyn_cast<Instruction>(Op))
7134       if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
7135         PredInst2Recipe[PredInst]->setAlsoPack(false);
7136 
7137   // Finalize the recipe for Instr, first if it is not predicated.
7138   if (!IsPredicated) {
7139     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
7140     VPBB->appendRecipe(Recipe);
7141     return VPBB;
7142   }
7143   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
7144   assert(VPBB->getSuccessors().empty() &&
7145          "VPBB has successors when handling predicated replication.");
7146   // Record predicated instructions for above packing optimizations.
7147   PredInst2Recipe[I] = Recipe;
7148   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
7149   VPBlockUtils::insertBlockAfter(Region, VPBB);
7150   auto *RegSucc = new VPBasicBlock();
7151   VPBlockUtils::insertBlockAfter(RegSucc, Region);
7152   return RegSucc;
7153 }
7154 
7155 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
7156                                                       VPRecipeBase *PredRecipe,
7157                                                       VPlanPtr &Plan) {
7158   // Instructions marked for predication are replicated and placed under an
7159   // if-then construct to prevent side-effects.
7160 
7161   // Generate recipes to compute the block mask for this region.
7162   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
7163 
7164   // Build the triangular if-then region.
7165   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
7166   assert(Instr->getParent() && "Predicated instruction not in any basic block");
7167   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
7168   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
7169   auto *PHIRecipe =
7170       Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr);
7171   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
7172   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
7173   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
7174 
7175   // Note: first set Entry as region entry and then connect successors starting
7176   // from it in order, to propagate the "parent" of each VPBasicBlock.
7177   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
7178   VPBlockUtils::connectBlocks(Pred, Exit);
7179 
7180   return Region;
7181 }
7182 
7183 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
7184                                                       VFRange &Range,
7185                                                       VPlanPtr &Plan) {
7186   // First, check for specific widening recipes that deal with calls, memory
7187   // operations, inductions and Phi nodes.
7188   if (auto *CI = dyn_cast<CallInst>(Instr))
7189     return tryToWidenCall(CI, Range, *Plan);
7190 
7191   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
7192     return tryToWidenMemory(Instr, Range, Plan);
7193 
7194   VPRecipeBase *Recipe;
7195   if (auto Phi = dyn_cast<PHINode>(Instr)) {
7196     if (Phi->getParent() != OrigLoop->getHeader())
7197       return tryToBlend(Phi, Plan);
7198     if ((Recipe = tryToOptimizeInductionPHI(Phi)))
7199       return Recipe;
7200     return new VPWidenPHIRecipe(Phi);
7201   }
7202 
7203   if (isa<TruncInst>(Instr) &&
7204       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Range)))
7205     return Recipe;
7206 
7207   if (!shouldWiden(Instr, Range))
7208     return nullptr;
7209 
7210   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
7211     return new VPWidenGEPRecipe(GEP, Plan->mapToVPValues(GEP->operands()),
7212                                 OrigLoop);
7213 
7214   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
7215     bool InvariantCond =
7216         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
7217     return new VPWidenSelectRecipe(*SI, Plan->mapToVPValues(SI->operands()),
7218                                    InvariantCond);
7219   }
7220 
7221   return tryToWiden(Instr, *Plan);
7222 }
7223 
7224 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF,
7225                                                         unsigned MaxVF) {
7226   assert(OrigLoop->empty() && "Inner loop expected.");
7227 
7228   // Collect conditions feeding internal conditional branches; they need to be
7229   // represented in VPlan for it to model masking.
7230   SmallPtrSet<Value *, 1> NeedDef;
7231 
7232   auto *Latch = OrigLoop->getLoopLatch();
7233   for (BasicBlock *BB : OrigLoop->blocks()) {
7234     if (BB == Latch)
7235       continue;
7236     BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator());
7237     if (Branch && Branch->isConditional())
7238       NeedDef.insert(Branch->getCondition());
7239   }
7240 
7241   // If the tail is to be folded by masking, the primary induction variable, if
7242   // exists needs to be represented in VPlan for it to model early-exit masking.
7243   // Also, both the Phi and the live-out instruction of each reduction are
7244   // required in order to introduce a select between them in VPlan.
7245   if (CM.foldTailByMasking()) {
7246     if (Legal->getPrimaryInduction())
7247       NeedDef.insert(Legal->getPrimaryInduction());
7248     for (auto &Reduction : Legal->getReductionVars()) {
7249       NeedDef.insert(Reduction.first);
7250       NeedDef.insert(Reduction.second.getLoopExitInstr());
7251     }
7252   }
7253 
7254   // Collect instructions from the original loop that will become trivially dead
7255   // in the vectorized loop. We don't need to vectorize these instructions. For
7256   // example, original induction update instructions can become dead because we
7257   // separately emit induction "steps" when generating code for the new loop.
7258   // Similarly, we create a new latch condition when setting up the structure
7259   // of the new loop, so the old one can become dead.
7260   SmallPtrSet<Instruction *, 4> DeadInstructions;
7261   collectTriviallyDeadInstructions(DeadInstructions);
7262 
7263   // Add assume instructions we need to drop to DeadInstructions, to prevent
7264   // them from being added to the VPlan.
7265   // TODO: We only need to drop assumes in blocks that get flattend. If the
7266   // control flow is preserved, we should keep them.
7267   auto &ConditionalAssumes = Legal->getConditionalAssumes();
7268   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
7269 
7270   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
7271   // Dead instructions do not need sinking. Remove them from SinkAfter.
7272   for (Instruction *I : DeadInstructions)
7273     SinkAfter.erase(I);
7274 
7275   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
7276     VFRange SubRange = {VF, MaxVF + 1};
7277     VPlans.push_back(buildVPlanWithVPRecipes(SubRange, NeedDef,
7278                                              DeadInstructions, SinkAfter));
7279     VF = SubRange.End;
7280   }
7281 }
7282 
7283 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
7284     VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef,
7285     SmallPtrSetImpl<Instruction *> &DeadInstructions,
7286     const DenseMap<Instruction *, Instruction *> &SinkAfter) {
7287 
7288   // Hold a mapping from predicated instructions to their recipes, in order to
7289   // fix their AlsoPack behavior if a user is determined to replicate and use a
7290   // scalar instead of vector value.
7291   DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
7292 
7293   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
7294 
7295   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
7296 
7297   // ---------------------------------------------------------------------------
7298   // Pre-construction: record ingredients whose recipes we'll need to further
7299   // process after constructing the initial VPlan.
7300   // ---------------------------------------------------------------------------
7301 
7302   // Mark instructions we'll need to sink later and their targets as
7303   // ingredients whose recipe we'll need to record.
7304   for (auto &Entry : SinkAfter) {
7305     RecipeBuilder.recordRecipeOf(Entry.first);
7306     RecipeBuilder.recordRecipeOf(Entry.second);
7307   }
7308 
7309   // For each interleave group which is relevant for this (possibly trimmed)
7310   // Range, add it to the set of groups to be later applied to the VPlan and add
7311   // placeholders for its members' Recipes which we'll be replacing with a
7312   // single VPInterleaveRecipe.
7313   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
7314     auto applyIG = [IG, this](unsigned VF) -> bool {
7315       return (VF >= 2 && // Query is illegal for VF == 1
7316               CM.getWideningDecision(IG->getInsertPos(), VF) ==
7317                   LoopVectorizationCostModel::CM_Interleave);
7318     };
7319     if (!getDecisionAndClampRange(applyIG, Range))
7320       continue;
7321     InterleaveGroups.insert(IG);
7322     for (unsigned i = 0; i < IG->getFactor(); i++)
7323       if (Instruction *Member = IG->getMember(i))
7324         RecipeBuilder.recordRecipeOf(Member);
7325   };
7326 
7327   // ---------------------------------------------------------------------------
7328   // Build initial VPlan: Scan the body of the loop in a topological order to
7329   // visit each basic block after having visited its predecessor basic blocks.
7330   // ---------------------------------------------------------------------------
7331 
7332   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
7333   auto Plan = std::make_unique<VPlan>();
7334   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
7335   Plan->setEntry(VPBB);
7336 
7337   // Represent values that will have defs inside VPlan.
7338   for (Value *V : NeedDef)
7339     Plan->addVPValue(V);
7340 
7341   // Scan the body of the loop in a topological order to visit each basic block
7342   // after having visited its predecessor basic blocks.
7343   LoopBlocksDFS DFS(OrigLoop);
7344   DFS.perform(LI);
7345 
7346   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
7347     // Relevant instructions from basic block BB will be grouped into VPRecipe
7348     // ingredients and fill a new VPBasicBlock.
7349     unsigned VPBBsForBB = 0;
7350     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
7351     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
7352     VPBB = FirstVPBBForBB;
7353     Builder.setInsertPoint(VPBB);
7354 
7355     // Introduce each ingredient into VPlan.
7356     // TODO: Model and preserve debug instrinsics in VPlan.
7357     for (Instruction &I : BB->instructionsWithoutDebug()) {
7358       Instruction *Instr = &I;
7359 
7360       // First filter out irrelevant instructions, to ensure no recipes are
7361       // built for them.
7362       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
7363         continue;
7364 
7365       if (auto Recipe =
7366               RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) {
7367         RecipeBuilder.setRecipe(Instr, Recipe);
7368         VPBB->appendRecipe(Recipe);
7369         continue;
7370       }
7371 
7372       // Otherwise, if all widening options failed, Instruction is to be
7373       // replicated. This may create a successor for VPBB.
7374       VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication(
7375           Instr, Range, VPBB, PredInst2Recipe, Plan);
7376       if (NextVPBB != VPBB) {
7377         VPBB = NextVPBB;
7378         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
7379                                     : "");
7380       }
7381     }
7382   }
7383 
7384   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
7385   // may also be empty, such as the last one VPBB, reflecting original
7386   // basic-blocks with no recipes.
7387   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
7388   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
7389   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
7390   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
7391   delete PreEntry;
7392 
7393   // ---------------------------------------------------------------------------
7394   // Transform initial VPlan: Apply previously taken decisions, in order, to
7395   // bring the VPlan to its final state.
7396   // ---------------------------------------------------------------------------
7397 
7398   // Apply Sink-After legal constraints.
7399   for (auto &Entry : SinkAfter) {
7400     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
7401     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
7402     Sink->moveAfter(Target);
7403   }
7404 
7405   // Interleave memory: for each Interleave Group we marked earlier as relevant
7406   // for this VPlan, replace the Recipes widening its memory instructions with a
7407   // single VPInterleaveRecipe at its insertion point.
7408   for (auto IG : InterleaveGroups) {
7409     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
7410         RecipeBuilder.getRecipe(IG->getInsertPos()));
7411     (new VPInterleaveRecipe(IG, Recipe->getAddr(), Recipe->getMask()))
7412         ->insertBefore(Recipe);
7413 
7414     for (unsigned i = 0; i < IG->getFactor(); ++i)
7415       if (Instruction *Member = IG->getMember(i)) {
7416         RecipeBuilder.getRecipe(Member)->eraseFromParent();
7417       }
7418   }
7419 
7420   // Finally, if tail is folded by masking, introduce selects between the phi
7421   // and the live-out instruction of each reduction, at the end of the latch.
7422   if (CM.foldTailByMasking()) {
7423     Builder.setInsertPoint(VPBB);
7424     auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
7425     for (auto &Reduction : Legal->getReductionVars()) {
7426       VPValue *Phi = Plan->getVPValue(Reduction.first);
7427       VPValue *Red = Plan->getVPValue(Reduction.second.getLoopExitInstr());
7428       Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
7429     }
7430   }
7431 
7432   std::string PlanName;
7433   raw_string_ostream RSO(PlanName);
7434   unsigned VF = Range.Start;
7435   Plan->addVF(VF);
7436   RSO << "Initial VPlan for VF={" << VF;
7437   for (VF *= 2; VF < Range.End; VF *= 2) {
7438     Plan->addVF(VF);
7439     RSO << "," << VF;
7440   }
7441   RSO << "},UF>=1";
7442   RSO.flush();
7443   Plan->setName(PlanName);
7444 
7445   return Plan;
7446 }
7447 
7448 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
7449   // Outer loop handling: They may require CFG and instruction level
7450   // transformations before even evaluating whether vectorization is profitable.
7451   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7452   // the vectorization pipeline.
7453   assert(!OrigLoop->empty());
7454   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7455 
7456   // Create new empty VPlan
7457   auto Plan = std::make_unique<VPlan>();
7458 
7459   // Build hierarchical CFG
7460   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
7461   HCFGBuilder.buildHierarchicalCFG();
7462 
7463   for (unsigned VF = Range.Start; VF < Range.End; VF *= 2)
7464     Plan->addVF(VF);
7465 
7466   if (EnableVPlanPredication) {
7467     VPlanPredicator VPP(*Plan);
7468     VPP.predicate();
7469 
7470     // Avoid running transformation to recipes until masked code generation in
7471     // VPlan-native path is in place.
7472     return Plan;
7473   }
7474 
7475   SmallPtrSet<Instruction *, 1> DeadInstructions;
7476   VPlanTransforms::VPInstructionsToVPRecipes(
7477       OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions);
7478   return Plan;
7479 }
7480 
7481 Value* LoopVectorizationPlanner::VPCallbackILV::
7482 getOrCreateVectorValues(Value *V, unsigned Part) {
7483       return ILV.getOrCreateVectorValue(V, Part);
7484 }
7485 
7486 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue(
7487     Value *V, const VPIteration &Instance) {
7488   return ILV.getOrCreateScalarValue(V, Instance);
7489 }
7490 
7491 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
7492                                VPSlotTracker &SlotTracker) const {
7493   O << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
7494   IG->getInsertPos()->printAsOperand(O, false);
7495   O << ", ";
7496   getAddr()->printAsOperand(O, SlotTracker);
7497   VPValue *Mask = getMask();
7498   if (Mask) {
7499     O << ", ";
7500     Mask->printAsOperand(O, SlotTracker);
7501   }
7502   for (unsigned i = 0; i < IG->getFactor(); ++i)
7503     if (Instruction *I = IG->getMember(i))
7504       O << "\\l\" +\n" << Indent << "\"  " << VPlanIngredient(I) << " " << i;
7505 }
7506 
7507 void VPWidenCallRecipe::execute(VPTransformState &State) {
7508   State.ILV->widenCallInstruction(Ingredient, User, State);
7509 }
7510 
7511 void VPWidenSelectRecipe::execute(VPTransformState &State) {
7512   State.ILV->widenSelectInstruction(Ingredient, User, InvariantCond, State);
7513 }
7514 
7515 void VPWidenRecipe::execute(VPTransformState &State) {
7516   State.ILV->widenInstruction(Ingredient, User, State);
7517 }
7518 
7519 void VPWidenGEPRecipe::execute(VPTransformState &State) {
7520   State.ILV->widenGEP(GEP, User, State.UF, State.VF, IsPtrLoopInvariant,
7521                       IsIndexLoopInvariant, State);
7522 }
7523 
7524 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
7525   assert(!State.Instance && "Int or FP induction being replicated.");
7526   State.ILV->widenIntOrFpInduction(IV, Trunc);
7527 }
7528 
7529 void VPWidenPHIRecipe::execute(VPTransformState &State) {
7530   State.ILV->widenPHIInstruction(Phi, State.UF, State.VF);
7531 }
7532 
7533 void VPBlendRecipe::execute(VPTransformState &State) {
7534   State.ILV->setDebugLocFromInst(State.Builder, Phi);
7535   // We know that all PHIs in non-header blocks are converted into
7536   // selects, so we don't have to worry about the insertion order and we
7537   // can just use the builder.
7538   // At this point we generate the predication tree. There may be
7539   // duplications since this is a simple recursive scan, but future
7540   // optimizations will clean it up.
7541 
7542   unsigned NumIncoming = getNumIncomingValues();
7543 
7544   // Generate a sequence of selects of the form:
7545   // SELECT(Mask3, In3,
7546   //        SELECT(Mask2, In2,
7547   //               SELECT(Mask1, In1,
7548   //                      In0)))
7549   // Note that Mask0 is never used: lanes for which no path reaches this phi and
7550   // are essentially undef are taken from In0.
7551   InnerLoopVectorizer::VectorParts Entry(State.UF);
7552   for (unsigned In = 0; In < NumIncoming; ++In) {
7553     for (unsigned Part = 0; Part < State.UF; ++Part) {
7554       // We might have single edge PHIs (blocks) - use an identity
7555       // 'select' for the first PHI operand.
7556       Value *In0 = State.get(getIncomingValue(In), Part);
7557       if (In == 0)
7558         Entry[Part] = In0; // Initialize with the first incoming value.
7559       else {
7560         // Select between the current value and the previous incoming edge
7561         // based on the incoming mask.
7562         Value *Cond = State.get(getMask(In), Part);
7563         Entry[Part] =
7564             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
7565       }
7566     }
7567   }
7568   for (unsigned Part = 0; Part < State.UF; ++Part)
7569     State.ValueMap.setVectorValue(Phi, Part, Entry[Part]);
7570 }
7571 
7572 void VPInterleaveRecipe::execute(VPTransformState &State) {
7573   assert(!State.Instance && "Interleave group being replicated.");
7574   State.ILV->vectorizeInterleaveGroup(IG, State, getAddr(), getMask());
7575 }
7576 
7577 void VPReplicateRecipe::execute(VPTransformState &State) {
7578   if (State.Instance) { // Generate a single instance.
7579     State.ILV->scalarizeInstruction(Ingredient, User, *State.Instance,
7580                                     IsPredicated, State);
7581     // Insert scalar instance packing it into a vector.
7582     if (AlsoPack && State.VF > 1) {
7583       // If we're constructing lane 0, initialize to start from undef.
7584       if (State.Instance->Lane == 0) {
7585         Value *Undef = UndefValue::get(
7586             FixedVectorType::get(Ingredient->getType(), State.VF));
7587         State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef);
7588       }
7589       State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance);
7590     }
7591     return;
7592   }
7593 
7594   // Generate scalar instances for all VF lanes of all UF parts, unless the
7595   // instruction is uniform inwhich case generate only the first lane for each
7596   // of the UF parts.
7597   unsigned EndLane = IsUniform ? 1 : State.VF;
7598   for (unsigned Part = 0; Part < State.UF; ++Part)
7599     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
7600       State.ILV->scalarizeInstruction(Ingredient, User, {Part, Lane},
7601                                       IsPredicated, State);
7602 }
7603 
7604 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
7605   assert(State.Instance && "Branch on Mask works only on single instance.");
7606 
7607   unsigned Part = State.Instance->Part;
7608   unsigned Lane = State.Instance->Lane;
7609 
7610   Value *ConditionBit = nullptr;
7611   VPValue *BlockInMask = getMask();
7612   if (BlockInMask) {
7613     ConditionBit = State.get(BlockInMask, Part);
7614     if (ConditionBit->getType()->isVectorTy())
7615       ConditionBit = State.Builder.CreateExtractElement(
7616           ConditionBit, State.Builder.getInt32(Lane));
7617   } else // Block in mask is all-one.
7618     ConditionBit = State.Builder.getTrue();
7619 
7620   // Replace the temporary unreachable terminator with a new conditional branch,
7621   // whose two destinations will be set later when they are created.
7622   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
7623   assert(isa<UnreachableInst>(CurrentTerminator) &&
7624          "Expected to replace unreachable terminator with conditional branch.");
7625   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
7626   CondBr->setSuccessor(0, nullptr);
7627   ReplaceInstWithInst(CurrentTerminator, CondBr);
7628 }
7629 
7630 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
7631   assert(State.Instance && "Predicated instruction PHI works per instance.");
7632   Instruction *ScalarPredInst = cast<Instruction>(
7633       State.ValueMap.getScalarValue(PredInst, *State.Instance));
7634   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
7635   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
7636   assert(PredicatingBB && "Predicated block has no single predecessor.");
7637 
7638   // By current pack/unpack logic we need to generate only a single phi node: if
7639   // a vector value for the predicated instruction exists at this point it means
7640   // the instruction has vector users only, and a phi for the vector value is
7641   // needed. In this case the recipe of the predicated instruction is marked to
7642   // also do that packing, thereby "hoisting" the insert-element sequence.
7643   // Otherwise, a phi node for the scalar value is needed.
7644   unsigned Part = State.Instance->Part;
7645   if (State.ValueMap.hasVectorValue(PredInst, Part)) {
7646     Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part);
7647     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
7648     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
7649     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
7650     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
7651     State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache.
7652   } else {
7653     Type *PredInstType = PredInst->getType();
7654     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
7655     Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB);
7656     Phi->addIncoming(ScalarPredInst, PredicatedBB);
7657     State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi);
7658   }
7659 }
7660 
7661 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
7662   VPValue *StoredValue = isa<StoreInst>(Instr) ? getStoredValue() : nullptr;
7663   State.ILV->vectorizeMemoryInstruction(&Instr, State, getAddr(), StoredValue,
7664                                         getMask());
7665 }
7666 
7667 // Determine how to lower the scalar epilogue, which depends on 1) optimising
7668 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
7669 // predication, and 4) a TTI hook that analyses whether the loop is suitable
7670 // for predication.
7671 static ScalarEpilogueLowering getScalarEpilogueLowering(
7672     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
7673     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
7674     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
7675     LoopVectorizationLegality &LVL) {
7676   bool OptSize =
7677       F->hasOptSize() || llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
7678                                                      PGSOQueryType::IRPass);
7679   // 1) OptSize takes precedence over all other options, i.e. if this is set,
7680   // don't look at hints or options, and don't request a scalar epilogue.
7681   if (OptSize)
7682     return CM_ScalarEpilogueNotAllowedOptSize;
7683 
7684   bool PredicateOptDisabled = PreferPredicateOverEpilog.getNumOccurrences() &&
7685                               !PreferPredicateOverEpilog;
7686 
7687   // 2) Next, if disabling predication is requested on the command line, honour
7688   // this and request a scalar epilogue.
7689   if (PredicateOptDisabled)
7690     return CM_ScalarEpilogueAllowed;
7691 
7692   // 3) and 4) look if enabling predication is requested on the command line,
7693   // with a loop hint, or if the TTI hook indicates this is profitable, request
7694   // predication .
7695   if (PreferPredicateOverEpilog ||
7696       Hints.getPredicate() == LoopVectorizeHints::FK_Enabled ||
7697       (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
7698                                         LVL.getLAI()) &&
7699        Hints.getPredicate() != LoopVectorizeHints::FK_Disabled))
7700     return CM_ScalarEpilogueNotNeededUsePredicate;
7701 
7702   return CM_ScalarEpilogueAllowed;
7703 }
7704 
7705 // Process the loop in the VPlan-native vectorization path. This path builds
7706 // VPlan upfront in the vectorization pipeline, which allows to apply
7707 // VPlan-to-VPlan transformations from the very beginning without modifying the
7708 // input LLVM IR.
7709 static bool processLoopInVPlanNativePath(
7710     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
7711     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
7712     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
7713     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
7714     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) {
7715 
7716   if (PSE.getBackedgeTakenCount() == PSE.getSE()->getCouldNotCompute()) {
7717     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
7718     return false;
7719   }
7720   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
7721   Function *F = L->getHeader()->getParent();
7722   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
7723 
7724   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
7725       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
7726 
7727   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
7728                                 &Hints, IAI);
7729   // Use the planner for outer loop vectorization.
7730   // TODO: CM is not used at this point inside the planner. Turn CM into an
7731   // optional argument if we don't need it in the future.
7732   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE);
7733 
7734   // Get user vectorization factor.
7735   const unsigned UserVF = Hints.getWidth();
7736 
7737   // Plan how to best vectorize, return the best VF and its cost.
7738   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
7739 
7740   // If we are stress testing VPlan builds, do not attempt to generate vector
7741   // code. Masked vector code generation support will follow soon.
7742   // Also, do not attempt to vectorize if no vector code will be produced.
7743   if (VPlanBuildStressTest || EnableVPlanPredication ||
7744       VectorizationFactor::Disabled() == VF)
7745     return false;
7746 
7747   LVP.setBestPlan(VF.Width, 1);
7748 
7749   InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
7750                          &CM, BFI, PSI);
7751   LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
7752                     << L->getHeader()->getParent()->getName() << "\"\n");
7753   LVP.executePlan(LB, DT);
7754 
7755   // Mark the loop as already vectorized to avoid vectorizing again.
7756   Hints.setAlreadyVectorized();
7757 
7758   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
7759   return true;
7760 }
7761 
7762 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
7763     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
7764                                !EnableLoopInterleaving),
7765       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
7766                               !EnableLoopVectorization) {}
7767 
7768 bool LoopVectorizePass::processLoop(Loop *L) {
7769   assert((EnableVPlanNativePath || L->empty()) &&
7770          "VPlan-native path is not enabled. Only process inner loops.");
7771 
7772 #ifndef NDEBUG
7773   const std::string DebugLocStr = getDebugLocString(L);
7774 #endif /* NDEBUG */
7775 
7776   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
7777                     << L->getHeader()->getParent()->getName() << "\" from "
7778                     << DebugLocStr << "\n");
7779 
7780   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
7781 
7782   LLVM_DEBUG(
7783       dbgs() << "LV: Loop hints:"
7784              << " force="
7785              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
7786                      ? "disabled"
7787                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
7788                             ? "enabled"
7789                             : "?"))
7790              << " width=" << Hints.getWidth()
7791              << " unroll=" << Hints.getInterleave() << "\n");
7792 
7793   // Function containing loop
7794   Function *F = L->getHeader()->getParent();
7795 
7796   // Looking at the diagnostic output is the only way to determine if a loop
7797   // was vectorized (other than looking at the IR or machine code), so it
7798   // is important to generate an optimization remark for each loop. Most of
7799   // these messages are generated as OptimizationRemarkAnalysis. Remarks
7800   // generated as OptimizationRemark and OptimizationRemarkMissed are
7801   // less verbose reporting vectorized loops and unvectorized loops that may
7802   // benefit from vectorization, respectively.
7803 
7804   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
7805     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
7806     return false;
7807   }
7808 
7809   PredicatedScalarEvolution PSE(*SE, *L);
7810 
7811   // Check if it is legal to vectorize the loop.
7812   LoopVectorizationRequirements Requirements(*ORE);
7813   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
7814                                 &Requirements, &Hints, DB, AC, BFI, PSI);
7815   if (!LVL.canVectorize(EnableVPlanNativePath)) {
7816     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
7817     Hints.emitRemarkWithHints();
7818     return false;
7819   }
7820 
7821   // Check the function attributes and profiles to find out if this function
7822   // should be optimized for size.
7823   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
7824       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
7825 
7826   // Entrance to the VPlan-native vectorization path. Outer loops are processed
7827   // here. They may require CFG and instruction level transformations before
7828   // even evaluating whether vectorization is profitable. Since we cannot modify
7829   // the incoming IR, we need to build VPlan upfront in the vectorization
7830   // pipeline.
7831   if (!L->empty())
7832     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
7833                                         ORE, BFI, PSI, Hints);
7834 
7835   assert(L->empty() && "Inner loop expected.");
7836 
7837   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
7838   // count by optimizing for size, to minimize overheads.
7839   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
7840   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
7841     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
7842                       << "This loop is worth vectorizing only if no scalar "
7843                       << "iteration overheads are incurred.");
7844     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
7845       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
7846     else {
7847       LLVM_DEBUG(dbgs() << "\n");
7848       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
7849     }
7850   }
7851 
7852   // Check the function attributes to see if implicit floats are allowed.
7853   // FIXME: This check doesn't seem possibly correct -- what if the loop is
7854   // an integer loop and the vector instructions selected are purely integer
7855   // vector instructions?
7856   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
7857     reportVectorizationFailure(
7858         "Can't vectorize when the NoImplicitFloat attribute is used",
7859         "loop not vectorized due to NoImplicitFloat attribute",
7860         "NoImplicitFloat", ORE, L);
7861     Hints.emitRemarkWithHints();
7862     return false;
7863   }
7864 
7865   // Check if the target supports potentially unsafe FP vectorization.
7866   // FIXME: Add a check for the type of safety issue (denormal, signaling)
7867   // for the target we're vectorizing for, to make sure none of the
7868   // additional fp-math flags can help.
7869   if (Hints.isPotentiallyUnsafe() &&
7870       TTI->isFPVectorizationPotentiallyUnsafe()) {
7871     reportVectorizationFailure(
7872         "Potentially unsafe FP op prevents vectorization",
7873         "loop not vectorized due to unsafe FP support.",
7874         "UnsafeFP", ORE, L);
7875     Hints.emitRemarkWithHints();
7876     return false;
7877   }
7878 
7879   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
7880   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
7881 
7882   // If an override option has been passed in for interleaved accesses, use it.
7883   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
7884     UseInterleaved = EnableInterleavedMemAccesses;
7885 
7886   // Analyze interleaved memory accesses.
7887   if (UseInterleaved) {
7888     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
7889   }
7890 
7891   // Use the cost model.
7892   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
7893                                 F, &Hints, IAI);
7894   CM.collectValuesToIgnore();
7895 
7896   // Use the planner for vectorization.
7897   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE);
7898 
7899   // Get user vectorization factor and interleave count.
7900   unsigned UserVF = Hints.getWidth();
7901   unsigned UserIC = Hints.getInterleave();
7902 
7903   // Plan how to best vectorize, return the best VF and its cost.
7904   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
7905 
7906   VectorizationFactor VF = VectorizationFactor::Disabled();
7907   unsigned IC = 1;
7908 
7909   if (MaybeVF) {
7910     VF = *MaybeVF;
7911     // Select the interleave count.
7912     IC = CM.selectInterleaveCount(VF.Width, VF.Cost);
7913   }
7914 
7915   // Identify the diagnostic messages that should be produced.
7916   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
7917   bool VectorizeLoop = true, InterleaveLoop = true;
7918   if (Requirements.doesNotMeet(F, L, Hints)) {
7919     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
7920                          "requirements.\n");
7921     Hints.emitRemarkWithHints();
7922     return false;
7923   }
7924 
7925   if (VF.Width == 1) {
7926     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
7927     VecDiagMsg = std::make_pair(
7928         "VectorizationNotBeneficial",
7929         "the cost-model indicates that vectorization is not beneficial");
7930     VectorizeLoop = false;
7931   }
7932 
7933   if (!MaybeVF && UserIC > 1) {
7934     // Tell the user interleaving was avoided up-front, despite being explicitly
7935     // requested.
7936     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
7937                          "interleaving should be avoided up front\n");
7938     IntDiagMsg = std::make_pair(
7939         "InterleavingAvoided",
7940         "Ignoring UserIC, because interleaving was avoided up front");
7941     InterleaveLoop = false;
7942   } else if (IC == 1 && UserIC <= 1) {
7943     // Tell the user interleaving is not beneficial.
7944     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
7945     IntDiagMsg = std::make_pair(
7946         "InterleavingNotBeneficial",
7947         "the cost-model indicates that interleaving is not beneficial");
7948     InterleaveLoop = false;
7949     if (UserIC == 1) {
7950       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
7951       IntDiagMsg.second +=
7952           " and is explicitly disabled or interleave count is set to 1";
7953     }
7954   } else if (IC > 1 && UserIC == 1) {
7955     // Tell the user interleaving is beneficial, but it explicitly disabled.
7956     LLVM_DEBUG(
7957         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
7958     IntDiagMsg = std::make_pair(
7959         "InterleavingBeneficialButDisabled",
7960         "the cost-model indicates that interleaving is beneficial "
7961         "but is explicitly disabled or interleave count is set to 1");
7962     InterleaveLoop = false;
7963   }
7964 
7965   // Override IC if user provided an interleave count.
7966   IC = UserIC > 0 ? UserIC : IC;
7967 
7968   // Emit diagnostic messages, if any.
7969   const char *VAPassName = Hints.vectorizeAnalysisPassName();
7970   if (!VectorizeLoop && !InterleaveLoop) {
7971     // Do not vectorize or interleaving the loop.
7972     ORE->emit([&]() {
7973       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
7974                                       L->getStartLoc(), L->getHeader())
7975              << VecDiagMsg.second;
7976     });
7977     ORE->emit([&]() {
7978       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
7979                                       L->getStartLoc(), L->getHeader())
7980              << IntDiagMsg.second;
7981     });
7982     return false;
7983   } else if (!VectorizeLoop && InterleaveLoop) {
7984     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7985     ORE->emit([&]() {
7986       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
7987                                         L->getStartLoc(), L->getHeader())
7988              << VecDiagMsg.second;
7989     });
7990   } else if (VectorizeLoop && !InterleaveLoop) {
7991     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
7992                       << ") in " << DebugLocStr << '\n');
7993     ORE->emit([&]() {
7994       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
7995                                         L->getStartLoc(), L->getHeader())
7996              << IntDiagMsg.second;
7997     });
7998   } else if (VectorizeLoop && InterleaveLoop) {
7999     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
8000                       << ") in " << DebugLocStr << '\n');
8001     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
8002   }
8003 
8004   LVP.setBestPlan(VF.Width, IC);
8005 
8006   using namespace ore;
8007   bool DisableRuntimeUnroll = false;
8008   MDNode *OrigLoopID = L->getLoopID();
8009 
8010   if (!VectorizeLoop) {
8011     assert(IC > 1 && "interleave count should not be 1 or 0");
8012     // If we decided that it is not legal to vectorize the loop, then
8013     // interleave it.
8014     InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, &CM,
8015                                BFI, PSI);
8016     LVP.executePlan(Unroller, DT);
8017 
8018     ORE->emit([&]() {
8019       return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
8020                                 L->getHeader())
8021              << "interleaved loop (interleaved count: "
8022              << NV("InterleaveCount", IC) << ")";
8023     });
8024   } else {
8025     // If we decided that it is *legal* to vectorize the loop, then do it.
8026     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
8027                            &LVL, &CM, BFI, PSI);
8028     LVP.executePlan(LB, DT);
8029     ++LoopsVectorized;
8030 
8031     // Add metadata to disable runtime unrolling a scalar loop when there are
8032     // no runtime checks about strides and memory. A scalar loop that is
8033     // rarely used is not worth unrolling.
8034     if (!LB.areSafetyChecksAdded())
8035       DisableRuntimeUnroll = true;
8036 
8037     // Report the vectorization decision.
8038     ORE->emit([&]() {
8039       return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
8040                                 L->getHeader())
8041              << "vectorized loop (vectorization width: "
8042              << NV("VectorizationFactor", VF.Width)
8043              << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
8044     });
8045   }
8046 
8047   Optional<MDNode *> RemainderLoopID =
8048       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
8049                                       LLVMLoopVectorizeFollowupEpilogue});
8050   if (RemainderLoopID.hasValue()) {
8051     L->setLoopID(RemainderLoopID.getValue());
8052   } else {
8053     if (DisableRuntimeUnroll)
8054       AddRuntimeUnrollDisableMetaData(L);
8055 
8056     // Mark the loop as already vectorized to avoid vectorizing again.
8057     Hints.setAlreadyVectorized();
8058   }
8059 
8060   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
8061   return true;
8062 }
8063 
8064 LoopVectorizeResult LoopVectorizePass::runImpl(
8065     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
8066     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
8067     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
8068     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
8069     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
8070   SE = &SE_;
8071   LI = &LI_;
8072   TTI = &TTI_;
8073   DT = &DT_;
8074   BFI = &BFI_;
8075   TLI = TLI_;
8076   AA = &AA_;
8077   AC = &AC_;
8078   GetLAA = &GetLAA_;
8079   DB = &DB_;
8080   ORE = &ORE_;
8081   PSI = PSI_;
8082 
8083   // Don't attempt if
8084   // 1. the target claims to have no vector registers, and
8085   // 2. interleaving won't help ILP.
8086   //
8087   // The second condition is necessary because, even if the target has no
8088   // vector registers, loop vectorization may still enable scalar
8089   // interleaving.
8090   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
8091       TTI->getMaxInterleaveFactor(1) < 2)
8092     return LoopVectorizeResult(false, false);
8093 
8094   bool Changed = false, CFGChanged = false;
8095 
8096   // The vectorizer requires loops to be in simplified form.
8097   // Since simplification may add new inner loops, it has to run before the
8098   // legality and profitability checks. This means running the loop vectorizer
8099   // will simplify all loops, regardless of whether anything end up being
8100   // vectorized.
8101   for (auto &L : *LI)
8102     Changed |= CFGChanged |=
8103         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
8104 
8105   // Build up a worklist of inner-loops to vectorize. This is necessary as
8106   // the act of vectorizing or partially unrolling a loop creates new loops
8107   // and can invalidate iterators across the loops.
8108   SmallVector<Loop *, 8> Worklist;
8109 
8110   for (Loop *L : *LI)
8111     collectSupportedLoops(*L, LI, ORE, Worklist);
8112 
8113   LoopsAnalyzed += Worklist.size();
8114 
8115   // Now walk the identified inner loops.
8116   while (!Worklist.empty()) {
8117     Loop *L = Worklist.pop_back_val();
8118 
8119     // For the inner loops we actually process, form LCSSA to simplify the
8120     // transform.
8121     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
8122 
8123     Changed |= CFGChanged |= processLoop(L);
8124   }
8125 
8126   // Process each loop nest in the function.
8127   return LoopVectorizeResult(Changed, CFGChanged);
8128 }
8129 
8130 PreservedAnalyses LoopVectorizePass::run(Function &F,
8131                                          FunctionAnalysisManager &AM) {
8132     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
8133     auto &LI = AM.getResult<LoopAnalysis>(F);
8134     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
8135     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
8136     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
8137     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
8138     auto &AA = AM.getResult<AAManager>(F);
8139     auto &AC = AM.getResult<AssumptionAnalysis>(F);
8140     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
8141     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
8142     MemorySSA *MSSA = EnableMSSALoopDependency
8143                           ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
8144                           : nullptr;
8145 
8146     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
8147     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
8148         [&](Loop &L) -> const LoopAccessInfo & {
8149       LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, MSSA};
8150       return LAM.getResult<LoopAccessAnalysis>(L, AR);
8151     };
8152     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
8153     ProfileSummaryInfo *PSI =
8154         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
8155     LoopVectorizeResult Result =
8156         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
8157     if (!Result.MadeAnyChange)
8158       return PreservedAnalyses::all();
8159     PreservedAnalyses PA;
8160 
8161     // We currently do not preserve loopinfo/dominator analyses with outer loop
8162     // vectorization. Until this is addressed, mark these analyses as preserved
8163     // only for non-VPlan-native path.
8164     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
8165     if (!EnableVPlanNativePath) {
8166       PA.preserve<LoopAnalysis>();
8167       PA.preserve<DominatorTreeAnalysis>();
8168     }
8169     PA.preserve<BasicAA>();
8170     PA.preserve<GlobalsAA>();
8171     if (!Result.MadeCFGChange)
8172       PA.preserveSet<CFGAnalyses>();
8173     return PA;
8174 }
8175