1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/MemorySSA.h"
91 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
92 #include "llvm/Analysis/ProfileSummaryInfo.h"
93 #include "llvm/Analysis/ScalarEvolution.h"
94 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
95 #include "llvm/Analysis/TargetLibraryInfo.h"
96 #include "llvm/Analysis/TargetTransformInfo.h"
97 #include "llvm/Analysis/VectorUtils.h"
98 #include "llvm/IR/Attributes.h"
99 #include "llvm/IR/BasicBlock.h"
100 #include "llvm/IR/CFG.h"
101 #include "llvm/IR/Constant.h"
102 #include "llvm/IR/Constants.h"
103 #include "llvm/IR/DataLayout.h"
104 #include "llvm/IR/DebugInfoMetadata.h"
105 #include "llvm/IR/DebugLoc.h"
106 #include "llvm/IR/DerivedTypes.h"
107 #include "llvm/IR/DiagnosticInfo.h"
108 #include "llvm/IR/Dominators.h"
109 #include "llvm/IR/Function.h"
110 #include "llvm/IR/IRBuilder.h"
111 #include "llvm/IR/InstrTypes.h"
112 #include "llvm/IR/Instruction.h"
113 #include "llvm/IR/Instructions.h"
114 #include "llvm/IR/IntrinsicInst.h"
115 #include "llvm/IR/Intrinsics.h"
116 #include "llvm/IR/LLVMContext.h"
117 #include "llvm/IR/Metadata.h"
118 #include "llvm/IR/Module.h"
119 #include "llvm/IR/Operator.h"
120 #include "llvm/IR/Type.h"
121 #include "llvm/IR/Use.h"
122 #include "llvm/IR/User.h"
123 #include "llvm/IR/Value.h"
124 #include "llvm/IR/ValueHandle.h"
125 #include "llvm/IR/Verifier.h"
126 #include "llvm/InitializePasses.h"
127 #include "llvm/Pass.h"
128 #include "llvm/Support/Casting.h"
129 #include "llvm/Support/CommandLine.h"
130 #include "llvm/Support/Compiler.h"
131 #include "llvm/Support/Debug.h"
132 #include "llvm/Support/ErrorHandling.h"
133 #include "llvm/Support/MathExtras.h"
134 #include "llvm/Support/raw_ostream.h"
135 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
136 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
137 #include "llvm/Transforms/Utils/LoopSimplify.h"
138 #include "llvm/Transforms/Utils/LoopUtils.h"
139 #include "llvm/Transforms/Utils/LoopVersioning.h"
140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
141 #include "llvm/Transforms/Utils/SizeOpts.h"
142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
143 #include <algorithm>
144 #include <cassert>
145 #include <cstdint>
146 #include <cstdlib>
147 #include <functional>
148 #include <iterator>
149 #include <limits>
150 #include <memory>
151 #include <string>
152 #include <tuple>
153 #include <utility>
154 
155 using namespace llvm;
156 
157 #define LV_NAME "loop-vectorize"
158 #define DEBUG_TYPE LV_NAME
159 
160 /// @{
161 /// Metadata attribute names
162 static const char *const LLVMLoopVectorizeFollowupAll =
163     "llvm.loop.vectorize.followup_all";
164 static const char *const LLVMLoopVectorizeFollowupVectorized =
165     "llvm.loop.vectorize.followup_vectorized";
166 static const char *const LLVMLoopVectorizeFollowupEpilogue =
167     "llvm.loop.vectorize.followup_epilogue";
168 /// @}
169 
170 STATISTIC(LoopsVectorized, "Number of loops vectorized");
171 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
172 
173 /// Loops with a known constant trip count below this number are vectorized only
174 /// if no scalar iteration overheads are incurred.
175 static cl::opt<unsigned> TinyTripCountVectorThreshold(
176     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
177     cl::desc("Loops with a constant trip count that is smaller than this "
178              "value are vectorized only if no scalar iteration overheads "
179              "are incurred."));
180 
181 // Indicates that an epilogue is undesired, predication is preferred.
182 // This means that the vectorizer will try to fold the loop-tail (epilogue)
183 // into the loop and predicate the loop body accordingly.
184 static cl::opt<bool> PreferPredicateOverEpilog(
185     "prefer-predicate-over-epilog", cl::init(false), cl::Hidden,
186     cl::desc("Indicate that an epilogue is undesired, predication should be "
187              "used instead."));
188 
189 static cl::opt<bool> MaximizeBandwidth(
190     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
191     cl::desc("Maximize bandwidth when selecting vectorization factor which "
192              "will be determined by the smallest type in loop."));
193 
194 static cl::opt<bool> EnableInterleavedMemAccesses(
195     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
196     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
197 
198 /// An interleave-group may need masking if it resides in a block that needs
199 /// predication, or in order to mask away gaps.
200 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
201     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
202     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
203 
204 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
205     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
206     cl::desc("We don't interleave loops with a estimated constant trip count "
207              "below this number"));
208 
209 static cl::opt<unsigned> ForceTargetNumScalarRegs(
210     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
211     cl::desc("A flag that overrides the target's number of scalar registers."));
212 
213 static cl::opt<unsigned> ForceTargetNumVectorRegs(
214     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
215     cl::desc("A flag that overrides the target's number of vector registers."));
216 
217 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
218     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
219     cl::desc("A flag that overrides the target's max interleave factor for "
220              "scalar loops."));
221 
222 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
223     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
224     cl::desc("A flag that overrides the target's max interleave factor for "
225              "vectorized loops."));
226 
227 static cl::opt<unsigned> ForceTargetInstructionCost(
228     "force-target-instruction-cost", cl::init(0), cl::Hidden,
229     cl::desc("A flag that overrides the target's expected cost for "
230              "an instruction to a single constant value. Mostly "
231              "useful for getting consistent testing."));
232 
233 static cl::opt<unsigned> SmallLoopCost(
234     "small-loop-cost", cl::init(20), cl::Hidden,
235     cl::desc(
236         "The cost of a loop that is considered 'small' by the interleaver."));
237 
238 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
239     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
240     cl::desc("Enable the use of the block frequency analysis to access PGO "
241              "heuristics minimizing code growth in cold regions and being more "
242              "aggressive in hot regions."));
243 
244 // Runtime interleave loops for load/store throughput.
245 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
246     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
247     cl::desc(
248         "Enable runtime interleaving until load/store ports are saturated"));
249 
250 /// The number of stores in a loop that are allowed to need predication.
251 static cl::opt<unsigned> NumberOfStoresToPredicate(
252     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
253     cl::desc("Max number of stores to be predicated behind an if."));
254 
255 static cl::opt<bool> EnableIndVarRegisterHeur(
256     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
257     cl::desc("Count the induction variable only once when interleaving"));
258 
259 static cl::opt<bool> EnableCondStoresVectorization(
260     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
261     cl::desc("Enable if predication of stores during vectorization."));
262 
263 static cl::opt<unsigned> MaxNestedScalarReductionIC(
264     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
265     cl::desc("The maximum interleave count to use when interleaving a scalar "
266              "reduction in a nested loop."));
267 
268 cl::opt<bool> EnableVPlanNativePath(
269     "enable-vplan-native-path", cl::init(false), cl::Hidden,
270     cl::desc("Enable VPlan-native vectorization path with "
271              "support for outer loop vectorization."));
272 
273 // FIXME: Remove this switch once we have divergence analysis. Currently we
274 // assume divergent non-backedge branches when this switch is true.
275 cl::opt<bool> EnableVPlanPredication(
276     "enable-vplan-predication", cl::init(false), cl::Hidden,
277     cl::desc("Enable VPlan-native vectorization path predicator with "
278              "support for outer loop vectorization."));
279 
280 // This flag enables the stress testing of the VPlan H-CFG construction in the
281 // VPlan-native vectorization path. It must be used in conjuction with
282 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
283 // verification of the H-CFGs built.
284 static cl::opt<bool> VPlanBuildStressTest(
285     "vplan-build-stress-test", cl::init(false), cl::Hidden,
286     cl::desc(
287         "Build VPlan for every supported loop nest in the function and bail "
288         "out right after the build (stress test the VPlan H-CFG construction "
289         "in the VPlan-native vectorization path)."));
290 
291 cl::opt<bool> llvm::EnableLoopInterleaving(
292     "interleave-loops", cl::init(true), cl::Hidden,
293     cl::desc("Enable loop interleaving in Loop vectorization passes"));
294 cl::opt<bool> llvm::EnableLoopVectorization(
295     "vectorize-loops", cl::init(true), cl::Hidden,
296     cl::desc("Run the Loop vectorization passes"));
297 
298 /// A helper function that returns the type of loaded or stored value.
299 static Type *getMemInstValueType(Value *I) {
300   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
301          "Expected Load or Store instruction");
302   if (auto *LI = dyn_cast<LoadInst>(I))
303     return LI->getType();
304   return cast<StoreInst>(I)->getValueOperand()->getType();
305 }
306 
307 /// A helper function that returns true if the given type is irregular. The
308 /// type is irregular if its allocated size doesn't equal the store size of an
309 /// element of the corresponding vector type at the given vectorization factor.
310 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) {
311   // Determine if an array of VF elements of type Ty is "bitcast compatible"
312   // with a <VF x Ty> vector.
313   if (VF > 1) {
314     auto *VectorTy = FixedVectorType::get(Ty, VF);
315     return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy);
316   }
317 
318   // If the vectorization factor is one, we just check if an array of type Ty
319   // requires padding between elements.
320   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
321 }
322 
323 /// A helper function that returns the reciprocal of the block probability of
324 /// predicated blocks. If we return X, we are assuming the predicated block
325 /// will execute once for every X iterations of the loop header.
326 ///
327 /// TODO: We should use actual block probability here, if available. Currently,
328 ///       we always assume predicated blocks have a 50% chance of executing.
329 static unsigned getReciprocalPredBlockProb() { return 2; }
330 
331 /// A helper function that adds a 'fast' flag to floating-point operations.
332 static Value *addFastMathFlag(Value *V) {
333   if (isa<FPMathOperator>(V))
334     cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast());
335   return V;
336 }
337 
338 static Value *addFastMathFlag(Value *V, FastMathFlags FMF) {
339   if (isa<FPMathOperator>(V))
340     cast<Instruction>(V)->setFastMathFlags(FMF);
341   return V;
342 }
343 
344 /// A helper function that returns an integer or floating-point constant with
345 /// value C.
346 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
347   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
348                            : ConstantFP::get(Ty, C);
349 }
350 
351 /// Returns "best known" trip count for the specified loop \p L as defined by
352 /// the following procedure:
353 ///   1) Returns exact trip count if it is known.
354 ///   2) Returns expected trip count according to profile data if any.
355 ///   3) Returns upper bound estimate if it is known.
356 ///   4) Returns None if all of the above failed.
357 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
358   // Check if exact trip count is known.
359   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
360     return ExpectedTC;
361 
362   // Check if there is an expected trip count available from profile data.
363   if (LoopVectorizeWithBlockFrequency)
364     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
365       return EstimatedTC;
366 
367   // Check if upper bound estimate is known.
368   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
369     return ExpectedTC;
370 
371   return None;
372 }
373 
374 namespace llvm {
375 
376 /// InnerLoopVectorizer vectorizes loops which contain only one basic
377 /// block to a specified vectorization factor (VF).
378 /// This class performs the widening of scalars into vectors, or multiple
379 /// scalars. This class also implements the following features:
380 /// * It inserts an epilogue loop for handling loops that don't have iteration
381 ///   counts that are known to be a multiple of the vectorization factor.
382 /// * It handles the code generation for reduction variables.
383 /// * Scalarization (implementation using scalars) of un-vectorizable
384 ///   instructions.
385 /// InnerLoopVectorizer does not perform any vectorization-legality
386 /// checks, and relies on the caller to check for the different legality
387 /// aspects. The InnerLoopVectorizer relies on the
388 /// LoopVectorizationLegality class to provide information about the induction
389 /// and reduction variables that were found to a given vectorization factor.
390 class InnerLoopVectorizer {
391 public:
392   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
393                       LoopInfo *LI, DominatorTree *DT,
394                       const TargetLibraryInfo *TLI,
395                       const TargetTransformInfo *TTI, AssumptionCache *AC,
396                       OptimizationRemarkEmitter *ORE, unsigned VecWidth,
397                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
398                       LoopVectorizationCostModel *CM)
399       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
400         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
401         Builder(PSE.getSE()->getContext()),
402         VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {}
403   virtual ~InnerLoopVectorizer() = default;
404 
405   /// Create a new empty loop. Unlink the old loop and connect the new one.
406   /// Return the pre-header block of the new loop.
407   BasicBlock *createVectorizedLoopSkeleton();
408 
409   /// Widen a single instruction within the innermost loop.
410   void widenInstruction(Instruction &I, VPUser &Operands,
411                         VPTransformState &State);
412 
413   /// Widen a single call instruction within the innermost loop.
414   void widenCallInstruction(CallInst &I, VPUser &ArgOperands,
415                             VPTransformState &State);
416 
417   /// Widen a single select instruction within the innermost loop.
418   void widenSelectInstruction(SelectInst &I, VPUser &Operands,
419                               bool InvariantCond, VPTransformState &State);
420 
421   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
422   void fixVectorizedLoop();
423 
424   // Return true if any runtime check is added.
425   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
426 
427   /// A type for vectorized values in the new loop. Each value from the
428   /// original loop, when vectorized, is represented by UF vector values in the
429   /// new unrolled loop, where UF is the unroll factor.
430   using VectorParts = SmallVector<Value *, 2>;
431 
432   /// Vectorize a single GetElementPtrInst based on information gathered and
433   /// decisions taken during planning.
434   void widenGEP(GetElementPtrInst *GEP, VPUser &Indices, unsigned UF,
435                 unsigned VF, bool IsPtrLoopInvariant,
436                 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State);
437 
438   /// Vectorize a single PHINode in a block. This method handles the induction
439   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
440   /// arbitrary length vectors.
441   void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF);
442 
443   /// A helper function to scalarize a single Instruction in the innermost loop.
444   /// Generates a sequence of scalar instances for each lane between \p MinLane
445   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
446   /// inclusive. Uses the VPValue operands from \p Operands instead of \p
447   /// Instr's operands.
448   void scalarizeInstruction(Instruction *Instr, VPUser &Operands,
449                             const VPIteration &Instance, bool IfPredicateInstr,
450                             VPTransformState &State);
451 
452   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
453   /// is provided, the integer induction variable will first be truncated to
454   /// the corresponding type.
455   void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr);
456 
457   /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a
458   /// vector or scalar value on-demand if one is not yet available. When
459   /// vectorizing a loop, we visit the definition of an instruction before its
460   /// uses. When visiting the definition, we either vectorize or scalarize the
461   /// instruction, creating an entry for it in the corresponding map. (In some
462   /// cases, such as induction variables, we will create both vector and scalar
463   /// entries.) Then, as we encounter uses of the definition, we derive values
464   /// for each scalar or vector use unless such a value is already available.
465   /// For example, if we scalarize a definition and one of its uses is vector,
466   /// we build the required vector on-demand with an insertelement sequence
467   /// when visiting the use. Otherwise, if the use is scalar, we can use the
468   /// existing scalar definition.
469   ///
470   /// Return a value in the new loop corresponding to \p V from the original
471   /// loop at unroll index \p Part. If the value has already been vectorized,
472   /// the corresponding vector entry in VectorLoopValueMap is returned. If,
473   /// however, the value has a scalar entry in VectorLoopValueMap, we construct
474   /// a new vector value on-demand by inserting the scalar values into a vector
475   /// with an insertelement sequence. If the value has been neither vectorized
476   /// nor scalarized, it must be loop invariant, so we simply broadcast the
477   /// value into a vector.
478   Value *getOrCreateVectorValue(Value *V, unsigned Part);
479 
480   /// Return a value in the new loop corresponding to \p V from the original
481   /// loop at unroll and vector indices \p Instance. If the value has been
482   /// vectorized but not scalarized, the necessary extractelement instruction
483   /// will be generated.
484   Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance);
485 
486   /// Construct the vector value of a scalarized value \p V one lane at a time.
487   void packScalarIntoVectorValue(Value *V, const VPIteration &Instance);
488 
489   /// Try to vectorize interleaved access group \p Group with the base address
490   /// given in \p Addr, optionally masking the vector operations if \p
491   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
492   /// values in the vectorized loop.
493   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
494                                 VPTransformState &State, VPValue *Addr,
495                                 VPValue *BlockInMask = nullptr);
496 
497   /// Vectorize Load and Store instructions with the base address given in \p
498   /// Addr, optionally masking the vector operations if \p BlockInMask is
499   /// non-null. Use \p State to translate given VPValues to IR values in the
500   /// vectorized loop.
501   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
502                                   VPValue *Addr, VPValue *StoredValue,
503                                   VPValue *BlockInMask);
504 
505   /// Set the debug location in the builder using the debug location in
506   /// the instruction.
507   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
508 
509   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
510   void fixNonInductionPHIs(void);
511 
512 protected:
513   friend class LoopVectorizationPlanner;
514 
515   /// A small list of PHINodes.
516   using PhiVector = SmallVector<PHINode *, 4>;
517 
518   /// A type for scalarized values in the new loop. Each value from the
519   /// original loop, when scalarized, is represented by UF x VF scalar values
520   /// in the new unrolled loop, where UF is the unroll factor and VF is the
521   /// vectorization factor.
522   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
523 
524   /// Set up the values of the IVs correctly when exiting the vector loop.
525   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
526                     Value *CountRoundDown, Value *EndValue,
527                     BasicBlock *MiddleBlock);
528 
529   /// Create a new induction variable inside L.
530   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
531                                    Value *Step, Instruction *DL);
532 
533   /// Handle all cross-iteration phis in the header.
534   void fixCrossIterationPHIs();
535 
536   /// Fix a first-order recurrence. This is the second phase of vectorizing
537   /// this phi node.
538   void fixFirstOrderRecurrence(PHINode *Phi);
539 
540   /// Fix a reduction cross-iteration phi. This is the second phase of
541   /// vectorizing this phi node.
542   void fixReduction(PHINode *Phi);
543 
544   /// Clear NSW/NUW flags from reduction instructions if necessary.
545   void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc);
546 
547   /// The Loop exit block may have single value PHI nodes with some
548   /// incoming value. While vectorizing we only handled real values
549   /// that were defined inside the loop and we should have one value for
550   /// each predecessor of its parent basic block. See PR14725.
551   void fixLCSSAPHIs();
552 
553   /// Iteratively sink the scalarized operands of a predicated instruction into
554   /// the block that was created for it.
555   void sinkScalarOperands(Instruction *PredInst);
556 
557   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
558   /// represented as.
559   void truncateToMinimalBitwidths();
560 
561   /// Create a broadcast instruction. This method generates a broadcast
562   /// instruction (shuffle) for loop invariant values and for the induction
563   /// value. If this is the induction variable then we extend it to N, N+1, ...
564   /// this is needed because each iteration in the loop corresponds to a SIMD
565   /// element.
566   virtual Value *getBroadcastInstrs(Value *V);
567 
568   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
569   /// to each vector element of Val. The sequence starts at StartIndex.
570   /// \p Opcode is relevant for FP induction variable.
571   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
572                                Instruction::BinaryOps Opcode =
573                                Instruction::BinaryOpsEnd);
574 
575   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
576   /// variable on which to base the steps, \p Step is the size of the step, and
577   /// \p EntryVal is the value from the original loop that maps to the steps.
578   /// Note that \p EntryVal doesn't have to be an induction variable - it
579   /// can also be a truncate instruction.
580   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
581                         const InductionDescriptor &ID);
582 
583   /// Create a vector induction phi node based on an existing scalar one. \p
584   /// EntryVal is the value from the original loop that maps to the vector phi
585   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
586   /// truncate instruction, instead of widening the original IV, we widen a
587   /// version of the IV truncated to \p EntryVal's type.
588   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
589                                        Value *Step, Instruction *EntryVal);
590 
591   /// Returns true if an instruction \p I should be scalarized instead of
592   /// vectorized for the chosen vectorization factor.
593   bool shouldScalarizeInstruction(Instruction *I) const;
594 
595   /// Returns true if we should generate a scalar version of \p IV.
596   bool needsScalarInduction(Instruction *IV) const;
597 
598   /// If there is a cast involved in the induction variable \p ID, which should
599   /// be ignored in the vectorized loop body, this function records the
600   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
601   /// cast. We had already proved that the casted Phi is equal to the uncasted
602   /// Phi in the vectorized loop (under a runtime guard), and therefore
603   /// there is no need to vectorize the cast - the same value can be used in the
604   /// vector loop for both the Phi and the cast.
605   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
606   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
607   ///
608   /// \p EntryVal is the value from the original loop that maps to the vector
609   /// phi node and is used to distinguish what is the IV currently being
610   /// processed - original one (if \p EntryVal is a phi corresponding to the
611   /// original IV) or the "newly-created" one based on the proof mentioned above
612   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
613   /// latter case \p EntryVal is a TruncInst and we must not record anything for
614   /// that IV, but it's error-prone to expect callers of this routine to care
615   /// about that, hence this explicit parameter.
616   void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID,
617                                              const Instruction *EntryVal,
618                                              Value *VectorLoopValue,
619                                              unsigned Part,
620                                              unsigned Lane = UINT_MAX);
621 
622   /// Generate a shuffle sequence that will reverse the vector Vec.
623   virtual Value *reverseVector(Value *Vec);
624 
625   /// Returns (and creates if needed) the original loop trip count.
626   Value *getOrCreateTripCount(Loop *NewLoop);
627 
628   /// Returns (and creates if needed) the trip count of the widened loop.
629   Value *getOrCreateVectorTripCount(Loop *NewLoop);
630 
631   /// Returns a bitcasted value to the requested vector type.
632   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
633   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
634                                 const DataLayout &DL);
635 
636   /// Emit a bypass check to see if the vector trip count is zero, including if
637   /// it overflows.
638   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
639 
640   /// Emit a bypass check to see if all of the SCEV assumptions we've
641   /// had to make are correct.
642   void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
643 
644   /// Emit bypass checks to check any memory assumptions we may have made.
645   void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
646 
647   /// Compute the transformed value of Index at offset StartValue using step
648   /// StepValue.
649   /// For integer induction, returns StartValue + Index * StepValue.
650   /// For pointer induction, returns StartValue[Index * StepValue].
651   /// FIXME: The newly created binary instructions should contain nsw/nuw
652   /// flags, which can be found from the original scalar operations.
653   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
654                               const DataLayout &DL,
655                               const InductionDescriptor &ID) const;
656 
657   /// Add additional metadata to \p To that was not present on \p Orig.
658   ///
659   /// Currently this is used to add the noalias annotations based on the
660   /// inserted memchecks.  Use this for instructions that are *cloned* into the
661   /// vector loop.
662   void addNewMetadata(Instruction *To, const Instruction *Orig);
663 
664   /// Add metadata from one instruction to another.
665   ///
666   /// This includes both the original MDs from \p From and additional ones (\see
667   /// addNewMetadata).  Use this for *newly created* instructions in the vector
668   /// loop.
669   void addMetadata(Instruction *To, Instruction *From);
670 
671   /// Similar to the previous function but it adds the metadata to a
672   /// vector of instructions.
673   void addMetadata(ArrayRef<Value *> To, Instruction *From);
674 
675   /// The original loop.
676   Loop *OrigLoop;
677 
678   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
679   /// dynamic knowledge to simplify SCEV expressions and converts them to a
680   /// more usable form.
681   PredicatedScalarEvolution &PSE;
682 
683   /// Loop Info.
684   LoopInfo *LI;
685 
686   /// Dominator Tree.
687   DominatorTree *DT;
688 
689   /// Alias Analysis.
690   AAResults *AA;
691 
692   /// Target Library Info.
693   const TargetLibraryInfo *TLI;
694 
695   /// Target Transform Info.
696   const TargetTransformInfo *TTI;
697 
698   /// Assumption Cache.
699   AssumptionCache *AC;
700 
701   /// Interface to emit optimization remarks.
702   OptimizationRemarkEmitter *ORE;
703 
704   /// LoopVersioning.  It's only set up (non-null) if memchecks were
705   /// used.
706   ///
707   /// This is currently only used to add no-alias metadata based on the
708   /// memchecks.  The actually versioning is performed manually.
709   std::unique_ptr<LoopVersioning> LVer;
710 
711   /// The vectorization SIMD factor to use. Each vector will have this many
712   /// vector elements.
713   unsigned VF;
714 
715   /// The vectorization unroll factor to use. Each scalar is vectorized to this
716   /// many different vector instructions.
717   unsigned UF;
718 
719   /// The builder that we use
720   IRBuilder<> Builder;
721 
722   // --- Vectorization state ---
723 
724   /// The vector-loop preheader.
725   BasicBlock *LoopVectorPreHeader;
726 
727   /// The scalar-loop preheader.
728   BasicBlock *LoopScalarPreHeader;
729 
730   /// Middle Block between the vector and the scalar.
731   BasicBlock *LoopMiddleBlock;
732 
733   /// The ExitBlock of the scalar loop.
734   BasicBlock *LoopExitBlock;
735 
736   /// The vector loop body.
737   BasicBlock *LoopVectorBody;
738 
739   /// The scalar loop body.
740   BasicBlock *LoopScalarBody;
741 
742   /// A list of all bypass blocks. The first block is the entry of the loop.
743   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
744 
745   /// The new Induction variable which was added to the new block.
746   PHINode *Induction = nullptr;
747 
748   /// The induction variable of the old basic block.
749   PHINode *OldInduction = nullptr;
750 
751   /// Maps values from the original loop to their corresponding values in the
752   /// vectorized loop. A key value can map to either vector values, scalar
753   /// values or both kinds of values, depending on whether the key was
754   /// vectorized and scalarized.
755   VectorizerValueMap VectorLoopValueMap;
756 
757   /// Store instructions that were predicated.
758   SmallVector<Instruction *, 4> PredicatedInstructions;
759 
760   /// Trip count of the original loop.
761   Value *TripCount = nullptr;
762 
763   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
764   Value *VectorTripCount = nullptr;
765 
766   /// The legality analysis.
767   LoopVectorizationLegality *Legal;
768 
769   /// The profitablity analysis.
770   LoopVectorizationCostModel *Cost;
771 
772   // Record whether runtime checks are added.
773   bool AddedSafetyChecks = false;
774 
775   // Holds the end values for each induction variable. We save the end values
776   // so we can later fix-up the external users of the induction variables.
777   DenseMap<PHINode *, Value *> IVEndValues;
778 
779   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
780   // fixed up at the end of vector code generation.
781   SmallVector<PHINode *, 8> OrigPHIsToFix;
782 };
783 
784 class InnerLoopUnroller : public InnerLoopVectorizer {
785 public:
786   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
787                     LoopInfo *LI, DominatorTree *DT,
788                     const TargetLibraryInfo *TLI,
789                     const TargetTransformInfo *TTI, AssumptionCache *AC,
790                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
791                     LoopVectorizationLegality *LVL,
792                     LoopVectorizationCostModel *CM)
793       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1,
794                             UnrollFactor, LVL, CM) {}
795 
796 private:
797   Value *getBroadcastInstrs(Value *V) override;
798   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
799                        Instruction::BinaryOps Opcode =
800                        Instruction::BinaryOpsEnd) override;
801   Value *reverseVector(Value *Vec) override;
802 };
803 
804 } // end namespace llvm
805 
806 /// Look for a meaningful debug location on the instruction or it's
807 /// operands.
808 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
809   if (!I)
810     return I;
811 
812   DebugLoc Empty;
813   if (I->getDebugLoc() != Empty)
814     return I;
815 
816   for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
817     if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
818       if (OpInst->getDebugLoc() != Empty)
819         return OpInst;
820   }
821 
822   return I;
823 }
824 
825 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
826   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
827     const DILocation *DIL = Inst->getDebugLoc();
828     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
829         !isa<DbgInfoIntrinsic>(Inst)) {
830       auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(UF * VF);
831       if (NewDIL)
832         B.SetCurrentDebugLocation(NewDIL.getValue());
833       else
834         LLVM_DEBUG(dbgs()
835                    << "Failed to create new discriminator: "
836                    << DIL->getFilename() << " Line: " << DIL->getLine());
837     }
838     else
839       B.SetCurrentDebugLocation(DIL);
840   } else
841     B.SetCurrentDebugLocation(DebugLoc());
842 }
843 
844 /// Write a record \p DebugMsg about vectorization failure to the debug
845 /// output stream. If \p I is passed, it is an instruction that prevents
846 /// vectorization.
847 #ifndef NDEBUG
848 static void debugVectorizationFailure(const StringRef DebugMsg,
849     Instruction *I) {
850   dbgs() << "LV: Not vectorizing: " << DebugMsg;
851   if (I != nullptr)
852     dbgs() << " " << *I;
853   else
854     dbgs() << '.';
855   dbgs() << '\n';
856 }
857 #endif
858 
859 /// Create an analysis remark that explains why vectorization failed
860 ///
861 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
862 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
863 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
864 /// the location of the remark.  \return the remark object that can be
865 /// streamed to.
866 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
867     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
868   Value *CodeRegion = TheLoop->getHeader();
869   DebugLoc DL = TheLoop->getStartLoc();
870 
871   if (I) {
872     CodeRegion = I->getParent();
873     // If there is no debug location attached to the instruction, revert back to
874     // using the loop's.
875     if (I->getDebugLoc())
876       DL = I->getDebugLoc();
877   }
878 
879   OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
880   R << "loop not vectorized: ";
881   return R;
882 }
883 
884 namespace llvm {
885 
886 void reportVectorizationFailure(const StringRef DebugMsg,
887     const StringRef OREMsg, const StringRef ORETag,
888     OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) {
889   LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I));
890   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
891   ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(),
892                 ORETag, TheLoop, I) << OREMsg);
893 }
894 
895 } // end namespace llvm
896 
897 #ifndef NDEBUG
898 /// \return string containing a file name and a line # for the given loop.
899 static std::string getDebugLocString(const Loop *L) {
900   std::string Result;
901   if (L) {
902     raw_string_ostream OS(Result);
903     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
904       LoopDbgLoc.print(OS);
905     else
906       // Just print the module name.
907       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
908     OS.flush();
909   }
910   return Result;
911 }
912 #endif
913 
914 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
915                                          const Instruction *Orig) {
916   // If the loop was versioned with memchecks, add the corresponding no-alias
917   // metadata.
918   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
919     LVer->annotateInstWithNoAlias(To, Orig);
920 }
921 
922 void InnerLoopVectorizer::addMetadata(Instruction *To,
923                                       Instruction *From) {
924   propagateMetadata(To, From);
925   addNewMetadata(To, From);
926 }
927 
928 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
929                                       Instruction *From) {
930   for (Value *V : To) {
931     if (Instruction *I = dyn_cast<Instruction>(V))
932       addMetadata(I, From);
933   }
934 }
935 
936 namespace llvm {
937 
938 // Loop vectorization cost-model hints how the scalar epilogue loop should be
939 // lowered.
940 enum ScalarEpilogueLowering {
941 
942   // The default: allowing scalar epilogues.
943   CM_ScalarEpilogueAllowed,
944 
945   // Vectorization with OptForSize: don't allow epilogues.
946   CM_ScalarEpilogueNotAllowedOptSize,
947 
948   // A special case of vectorisation with OptForSize: loops with a very small
949   // trip count are considered for vectorization under OptForSize, thereby
950   // making sure the cost of their loop body is dominant, free of runtime
951   // guards and scalar iteration overheads.
952   CM_ScalarEpilogueNotAllowedLowTripLoop,
953 
954   // Loop hint predicate indicating an epilogue is undesired.
955   CM_ScalarEpilogueNotNeededUsePredicate
956 };
957 
958 /// LoopVectorizationCostModel - estimates the expected speedups due to
959 /// vectorization.
960 /// In many cases vectorization is not profitable. This can happen because of
961 /// a number of reasons. In this class we mainly attempt to predict the
962 /// expected speedup/slowdowns due to the supported instruction set. We use the
963 /// TargetTransformInfo to query the different backends for the cost of
964 /// different operations.
965 class LoopVectorizationCostModel {
966 public:
967   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
968                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
969                              LoopVectorizationLegality *Legal,
970                              const TargetTransformInfo &TTI,
971                              const TargetLibraryInfo *TLI, DemandedBits *DB,
972                              AssumptionCache *AC,
973                              OptimizationRemarkEmitter *ORE, const Function *F,
974                              const LoopVectorizeHints *Hints,
975                              InterleavedAccessInfo &IAI)
976       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
977         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
978         Hints(Hints), InterleaveInfo(IAI) {}
979 
980   /// \return An upper bound for the vectorization factor, or None if
981   /// vectorization and interleaving should be avoided up front.
982   Optional<unsigned> computeMaxVF(unsigned UserVF, unsigned UserIC);
983 
984   /// \return True if runtime checks are required for vectorization, and false
985   /// otherwise.
986   bool runtimeChecksRequired();
987 
988   /// \return The most profitable vectorization factor and the cost of that VF.
989   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
990   /// then this vectorization factor will be selected if vectorization is
991   /// possible.
992   VectorizationFactor selectVectorizationFactor(unsigned MaxVF);
993 
994   /// Setup cost-based decisions for user vectorization factor.
995   void selectUserVectorizationFactor(unsigned UserVF) {
996     collectUniformsAndScalars(UserVF);
997     collectInstsToScalarize(UserVF);
998   }
999 
1000   /// \return The size (in bits) of the smallest and widest types in the code
1001   /// that needs to be vectorized. We ignore values that remain scalar such as
1002   /// 64 bit loop indices.
1003   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1004 
1005   /// \return The desired interleave count.
1006   /// If interleave count has been specified by metadata it will be returned.
1007   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1008   /// are the selected vectorization factor and the cost of the selected VF.
1009   unsigned selectInterleaveCount(unsigned VF, unsigned LoopCost);
1010 
1011   /// Memory access instruction may be vectorized in more than one way.
1012   /// Form of instruction after vectorization depends on cost.
1013   /// This function takes cost-based decisions for Load/Store instructions
1014   /// and collects them in a map. This decisions map is used for building
1015   /// the lists of loop-uniform and loop-scalar instructions.
1016   /// The calculated cost is saved with widening decision in order to
1017   /// avoid redundant calculations.
1018   void setCostBasedWideningDecision(unsigned VF);
1019 
1020   /// A struct that represents some properties of the register usage
1021   /// of a loop.
1022   struct RegisterUsage {
1023     /// Holds the number of loop invariant values that are used in the loop.
1024     /// The key is ClassID of target-provided register class.
1025     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1026     /// Holds the maximum number of concurrent live intervals in the loop.
1027     /// The key is ClassID of target-provided register class.
1028     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1029   };
1030 
1031   /// \return Returns information about the register usages of the loop for the
1032   /// given vectorization factors.
1033   SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs);
1034 
1035   /// Collect values we want to ignore in the cost model.
1036   void collectValuesToIgnore();
1037 
1038   /// \returns The smallest bitwidth each instruction can be represented with.
1039   /// The vector equivalents of these instructions should be truncated to this
1040   /// type.
1041   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1042     return MinBWs;
1043   }
1044 
1045   /// \returns True if it is more profitable to scalarize instruction \p I for
1046   /// vectorization factor \p VF.
1047   bool isProfitableToScalarize(Instruction *I, unsigned VF) const {
1048     assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1.");
1049 
1050     // Cost model is not run in the VPlan-native path - return conservative
1051     // result until this changes.
1052     if (EnableVPlanNativePath)
1053       return false;
1054 
1055     auto Scalars = InstsToScalarize.find(VF);
1056     assert(Scalars != InstsToScalarize.end() &&
1057            "VF not yet analyzed for scalarization profitability");
1058     return Scalars->second.find(I) != Scalars->second.end();
1059   }
1060 
1061   /// Returns true if \p I is known to be uniform after vectorization.
1062   bool isUniformAfterVectorization(Instruction *I, unsigned VF) const {
1063     if (VF == 1)
1064       return true;
1065 
1066     // Cost model is not run in the VPlan-native path - return conservative
1067     // result until this changes.
1068     if (EnableVPlanNativePath)
1069       return false;
1070 
1071     auto UniformsPerVF = Uniforms.find(VF);
1072     assert(UniformsPerVF != Uniforms.end() &&
1073            "VF not yet analyzed for uniformity");
1074     return UniformsPerVF->second.count(I);
1075   }
1076 
1077   /// Returns true if \p I is known to be scalar after vectorization.
1078   bool isScalarAfterVectorization(Instruction *I, unsigned VF) const {
1079     if (VF == 1)
1080       return true;
1081 
1082     // Cost model is not run in the VPlan-native path - return conservative
1083     // result until this changes.
1084     if (EnableVPlanNativePath)
1085       return false;
1086 
1087     auto ScalarsPerVF = Scalars.find(VF);
1088     assert(ScalarsPerVF != Scalars.end() &&
1089            "Scalar values are not calculated for VF");
1090     return ScalarsPerVF->second.count(I);
1091   }
1092 
1093   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1094   /// for vectorization factor \p VF.
1095   bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const {
1096     return VF > 1 && MinBWs.find(I) != MinBWs.end() &&
1097            !isProfitableToScalarize(I, VF) &&
1098            !isScalarAfterVectorization(I, VF);
1099   }
1100 
1101   /// Decision that was taken during cost calculation for memory instruction.
1102   enum InstWidening {
1103     CM_Unknown,
1104     CM_Widen,         // For consecutive accesses with stride +1.
1105     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1106     CM_Interleave,
1107     CM_GatherScatter,
1108     CM_Scalarize
1109   };
1110 
1111   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1112   /// instruction \p I and vector width \p VF.
1113   void setWideningDecision(Instruction *I, unsigned VF, InstWidening W,
1114                            unsigned Cost) {
1115     assert(VF >= 2 && "Expected VF >=2");
1116     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1117   }
1118 
1119   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1120   /// interleaving group \p Grp and vector width \p VF.
1121   void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF,
1122                            InstWidening W, unsigned Cost) {
1123     assert(VF >= 2 && "Expected VF >=2");
1124     /// Broadcast this decicion to all instructions inside the group.
1125     /// But the cost will be assigned to one instruction only.
1126     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1127       if (auto *I = Grp->getMember(i)) {
1128         if (Grp->getInsertPos() == I)
1129           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1130         else
1131           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1132       }
1133     }
1134   }
1135 
1136   /// Return the cost model decision for the given instruction \p I and vector
1137   /// width \p VF. Return CM_Unknown if this instruction did not pass
1138   /// through the cost modeling.
1139   InstWidening getWideningDecision(Instruction *I, unsigned VF) {
1140     assert(VF >= 2 && "Expected VF >=2");
1141 
1142     // Cost model is not run in the VPlan-native path - return conservative
1143     // result until this changes.
1144     if (EnableVPlanNativePath)
1145       return CM_GatherScatter;
1146 
1147     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1148     auto Itr = WideningDecisions.find(InstOnVF);
1149     if (Itr == WideningDecisions.end())
1150       return CM_Unknown;
1151     return Itr->second.first;
1152   }
1153 
1154   /// Return the vectorization cost for the given instruction \p I and vector
1155   /// width \p VF.
1156   unsigned getWideningCost(Instruction *I, unsigned VF) {
1157     assert(VF >= 2 && "Expected VF >=2");
1158     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1159     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1160            "The cost is not calculated");
1161     return WideningDecisions[InstOnVF].second;
1162   }
1163 
1164   /// Return True if instruction \p I is an optimizable truncate whose operand
1165   /// is an induction variable. Such a truncate will be removed by adding a new
1166   /// induction variable with the destination type.
1167   bool isOptimizableIVTruncate(Instruction *I, unsigned VF) {
1168     // If the instruction is not a truncate, return false.
1169     auto *Trunc = dyn_cast<TruncInst>(I);
1170     if (!Trunc)
1171       return false;
1172 
1173     // Get the source and destination types of the truncate.
1174     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1175     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1176 
1177     // If the truncate is free for the given types, return false. Replacing a
1178     // free truncate with an induction variable would add an induction variable
1179     // update instruction to each iteration of the loop. We exclude from this
1180     // check the primary induction variable since it will need an update
1181     // instruction regardless.
1182     Value *Op = Trunc->getOperand(0);
1183     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1184       return false;
1185 
1186     // If the truncated value is not an induction variable, return false.
1187     return Legal->isInductionPhi(Op);
1188   }
1189 
1190   /// Collects the instructions to scalarize for each predicated instruction in
1191   /// the loop.
1192   void collectInstsToScalarize(unsigned VF);
1193 
1194   /// Collect Uniform and Scalar values for the given \p VF.
1195   /// The sets depend on CM decision for Load/Store instructions
1196   /// that may be vectorized as interleave, gather-scatter or scalarized.
1197   void collectUniformsAndScalars(unsigned VF) {
1198     // Do the analysis once.
1199     if (VF == 1 || Uniforms.find(VF) != Uniforms.end())
1200       return;
1201     setCostBasedWideningDecision(VF);
1202     collectLoopUniforms(VF);
1203     collectLoopScalars(VF);
1204   }
1205 
1206   /// Returns true if the target machine supports masked store operation
1207   /// for the given \p DataType and kind of access to \p Ptr.
1208   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) {
1209     return Legal->isConsecutivePtr(Ptr) &&
1210            TTI.isLegalMaskedStore(DataType, Alignment);
1211   }
1212 
1213   /// Returns true if the target machine supports masked load operation
1214   /// for the given \p DataType and kind of access to \p Ptr.
1215   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) {
1216     return Legal->isConsecutivePtr(Ptr) &&
1217            TTI.isLegalMaskedLoad(DataType, Alignment);
1218   }
1219 
1220   /// Returns true if the target machine supports masked scatter operation
1221   /// for the given \p DataType.
1222   bool isLegalMaskedScatter(Type *DataType, Align Alignment) {
1223     return TTI.isLegalMaskedScatter(DataType, Alignment);
1224   }
1225 
1226   /// Returns true if the target machine supports masked gather operation
1227   /// for the given \p DataType.
1228   bool isLegalMaskedGather(Type *DataType, Align Alignment) {
1229     return TTI.isLegalMaskedGather(DataType, Alignment);
1230   }
1231 
1232   /// Returns true if the target machine can represent \p V as a masked gather
1233   /// or scatter operation.
1234   bool isLegalGatherOrScatter(Value *V) {
1235     bool LI = isa<LoadInst>(V);
1236     bool SI = isa<StoreInst>(V);
1237     if (!LI && !SI)
1238       return false;
1239     auto *Ty = getMemInstValueType(V);
1240     Align Align = getLoadStoreAlignment(V);
1241     return (LI && isLegalMaskedGather(Ty, Align)) ||
1242            (SI && isLegalMaskedScatter(Ty, Align));
1243   }
1244 
1245   /// Returns true if \p I is an instruction that will be scalarized with
1246   /// predication. Such instructions include conditional stores and
1247   /// instructions that may divide by zero.
1248   /// If a non-zero VF has been calculated, we check if I will be scalarized
1249   /// predication for that VF.
1250   bool isScalarWithPredication(Instruction *I, unsigned VF = 1);
1251 
1252   // Returns true if \p I is an instruction that will be predicated either
1253   // through scalar predication or masked load/store or masked gather/scatter.
1254   // Superset of instructions that return true for isScalarWithPredication.
1255   bool isPredicatedInst(Instruction *I) {
1256     if (!blockNeedsPredication(I->getParent()))
1257       return false;
1258     // Loads and stores that need some form of masked operation are predicated
1259     // instructions.
1260     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1261       return Legal->isMaskRequired(I);
1262     return isScalarWithPredication(I);
1263   }
1264 
1265   /// Returns true if \p I is a memory instruction with consecutive memory
1266   /// access that can be widened.
1267   bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1);
1268 
1269   /// Returns true if \p I is a memory instruction in an interleaved-group
1270   /// of memory accesses that can be vectorized with wide vector loads/stores
1271   /// and shuffles.
1272   bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1);
1273 
1274   /// Check if \p Instr belongs to any interleaved access group.
1275   bool isAccessInterleaved(Instruction *Instr) {
1276     return InterleaveInfo.isInterleaved(Instr);
1277   }
1278 
1279   /// Get the interleaved access group that \p Instr belongs to.
1280   const InterleaveGroup<Instruction> *
1281   getInterleavedAccessGroup(Instruction *Instr) {
1282     return InterleaveInfo.getInterleaveGroup(Instr);
1283   }
1284 
1285   /// Returns true if an interleaved group requires a scalar iteration
1286   /// to handle accesses with gaps, and there is nothing preventing us from
1287   /// creating a scalar epilogue.
1288   bool requiresScalarEpilogue() const {
1289     return isScalarEpilogueAllowed() && InterleaveInfo.requiresScalarEpilogue();
1290   }
1291 
1292   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1293   /// loop hint annotation.
1294   bool isScalarEpilogueAllowed() const {
1295     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1296   }
1297 
1298   /// Returns true if all loop blocks should be masked to fold tail loop.
1299   bool foldTailByMasking() const { return FoldTailByMasking; }
1300 
1301   bool blockNeedsPredication(BasicBlock *BB) {
1302     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1303   }
1304 
1305   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1306   /// with factor VF.  Return the cost of the instruction, including
1307   /// scalarization overhead if it's needed.
1308   unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF);
1309 
1310   /// Estimate cost of a call instruction CI if it were vectorized with factor
1311   /// VF. Return the cost of the instruction, including scalarization overhead
1312   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1313   /// scalarized -
1314   /// i.e. either vector version isn't available, or is too expensive.
1315   unsigned getVectorCallCost(CallInst *CI, unsigned VF, bool &NeedToScalarize);
1316 
1317   /// Invalidates decisions already taken by the cost model.
1318   void invalidateCostModelingDecisions() {
1319     WideningDecisions.clear();
1320     Uniforms.clear();
1321     Scalars.clear();
1322   }
1323 
1324 private:
1325   unsigned NumPredStores = 0;
1326 
1327   /// \return An upper bound for the vectorization factor, a power-of-2 larger
1328   /// than zero. One is returned if vectorization should best be avoided due
1329   /// to cost.
1330   unsigned computeFeasibleMaxVF(unsigned ConstTripCount);
1331 
1332   /// The vectorization cost is a combination of the cost itself and a boolean
1333   /// indicating whether any of the contributing operations will actually
1334   /// operate on
1335   /// vector values after type legalization in the backend. If this latter value
1336   /// is
1337   /// false, then all operations will be scalarized (i.e. no vectorization has
1338   /// actually taken place).
1339   using VectorizationCostTy = std::pair<unsigned, bool>;
1340 
1341   /// Returns the expected execution cost. The unit of the cost does
1342   /// not matter because we use the 'cost' units to compare different
1343   /// vector widths. The cost that is returned is *not* normalized by
1344   /// the factor width.
1345   VectorizationCostTy expectedCost(unsigned VF);
1346 
1347   /// Returns the execution time cost of an instruction for a given vector
1348   /// width. Vector width of one means scalar.
1349   VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF);
1350 
1351   /// The cost-computation logic from getInstructionCost which provides
1352   /// the vector type as an output parameter.
1353   unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy);
1354 
1355   /// Calculate vectorization cost of memory instruction \p I.
1356   unsigned getMemoryInstructionCost(Instruction *I, unsigned VF);
1357 
1358   /// The cost computation for scalarized memory instruction.
1359   unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF);
1360 
1361   /// The cost computation for interleaving group of memory instructions.
1362   unsigned getInterleaveGroupCost(Instruction *I, unsigned VF);
1363 
1364   /// The cost computation for Gather/Scatter instruction.
1365   unsigned getGatherScatterCost(Instruction *I, unsigned VF);
1366 
1367   /// The cost computation for widening instruction \p I with consecutive
1368   /// memory access.
1369   unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF);
1370 
1371   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1372   /// Load: scalar load + broadcast.
1373   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1374   /// element)
1375   unsigned getUniformMemOpCost(Instruction *I, unsigned VF);
1376 
1377   /// Estimate the overhead of scalarizing an instruction. This is a
1378   /// convenience wrapper for the type-based getScalarizationOverhead API.
1379   unsigned getScalarizationOverhead(Instruction *I, unsigned VF);
1380 
1381   /// Returns whether the instruction is a load or store and will be a emitted
1382   /// as a vector operation.
1383   bool isConsecutiveLoadOrStore(Instruction *I);
1384 
1385   /// Returns true if an artificially high cost for emulated masked memrefs
1386   /// should be used.
1387   bool useEmulatedMaskMemRefHack(Instruction *I);
1388 
1389   /// Map of scalar integer values to the smallest bitwidth they can be legally
1390   /// represented as. The vector equivalents of these values should be truncated
1391   /// to this type.
1392   MapVector<Instruction *, uint64_t> MinBWs;
1393 
1394   /// A type representing the costs for instructions if they were to be
1395   /// scalarized rather than vectorized. The entries are Instruction-Cost
1396   /// pairs.
1397   using ScalarCostsTy = DenseMap<Instruction *, unsigned>;
1398 
1399   /// A set containing all BasicBlocks that are known to present after
1400   /// vectorization as a predicated block.
1401   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1402 
1403   /// Records whether it is allowed to have the original scalar loop execute at
1404   /// least once. This may be needed as a fallback loop in case runtime
1405   /// aliasing/dependence checks fail, or to handle the tail/remainder
1406   /// iterations when the trip count is unknown or doesn't divide by the VF,
1407   /// or as a peel-loop to handle gaps in interleave-groups.
1408   /// Under optsize and when the trip count is very small we don't allow any
1409   /// iterations to execute in the scalar loop.
1410   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1411 
1412   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1413   bool FoldTailByMasking = false;
1414 
1415   /// A map holding scalar costs for different vectorization factors. The
1416   /// presence of a cost for an instruction in the mapping indicates that the
1417   /// instruction will be scalarized when vectorizing with the associated
1418   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1419   DenseMap<unsigned, ScalarCostsTy> InstsToScalarize;
1420 
1421   /// Holds the instructions known to be uniform after vectorization.
1422   /// The data is collected per VF.
1423   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms;
1424 
1425   /// Holds the instructions known to be scalar after vectorization.
1426   /// The data is collected per VF.
1427   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars;
1428 
1429   /// Holds the instructions (address computations) that are forced to be
1430   /// scalarized.
1431   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1432 
1433   /// Returns the expected difference in cost from scalarizing the expression
1434   /// feeding a predicated instruction \p PredInst. The instructions to
1435   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1436   /// non-negative return value implies the expression will be scalarized.
1437   /// Currently, only single-use chains are considered for scalarization.
1438   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1439                               unsigned VF);
1440 
1441   /// Collect the instructions that are uniform after vectorization. An
1442   /// instruction is uniform if we represent it with a single scalar value in
1443   /// the vectorized loop corresponding to each vector iteration. Examples of
1444   /// uniform instructions include pointer operands of consecutive or
1445   /// interleaved memory accesses. Note that although uniformity implies an
1446   /// instruction will be scalar, the reverse is not true. In general, a
1447   /// scalarized instruction will be represented by VF scalar values in the
1448   /// vectorized loop, each corresponding to an iteration of the original
1449   /// scalar loop.
1450   void collectLoopUniforms(unsigned VF);
1451 
1452   /// Collect the instructions that are scalar after vectorization. An
1453   /// instruction is scalar if it is known to be uniform or will be scalarized
1454   /// during vectorization. Non-uniform scalarized instructions will be
1455   /// represented by VF values in the vectorized loop, each corresponding to an
1456   /// iteration of the original scalar loop.
1457   void collectLoopScalars(unsigned VF);
1458 
1459   /// Keeps cost model vectorization decision and cost for instructions.
1460   /// Right now it is used for memory instructions only.
1461   using DecisionList = DenseMap<std::pair<Instruction *, unsigned>,
1462                                 std::pair<InstWidening, unsigned>>;
1463 
1464   DecisionList WideningDecisions;
1465 
1466   /// Returns true if \p V is expected to be vectorized and it needs to be
1467   /// extracted.
1468   bool needsExtract(Value *V, unsigned VF) const {
1469     Instruction *I = dyn_cast<Instruction>(V);
1470     if (VF == 1 || !I || !TheLoop->contains(I) || TheLoop->isLoopInvariant(I))
1471       return false;
1472 
1473     // Assume we can vectorize V (and hence we need extraction) if the
1474     // scalars are not computed yet. This can happen, because it is called
1475     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1476     // the scalars are collected. That should be a safe assumption in most
1477     // cases, because we check if the operands have vectorizable types
1478     // beforehand in LoopVectorizationLegality.
1479     return Scalars.find(VF) == Scalars.end() ||
1480            !isScalarAfterVectorization(I, VF);
1481   };
1482 
1483   /// Returns a range containing only operands needing to be extracted.
1484   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1485                                                    unsigned VF) {
1486     return SmallVector<Value *, 4>(make_filter_range(
1487         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1488   }
1489 
1490 public:
1491   /// The loop that we evaluate.
1492   Loop *TheLoop;
1493 
1494   /// Predicated scalar evolution analysis.
1495   PredicatedScalarEvolution &PSE;
1496 
1497   /// Loop Info analysis.
1498   LoopInfo *LI;
1499 
1500   /// Vectorization legality.
1501   LoopVectorizationLegality *Legal;
1502 
1503   /// Vector target information.
1504   const TargetTransformInfo &TTI;
1505 
1506   /// Target Library Info.
1507   const TargetLibraryInfo *TLI;
1508 
1509   /// Demanded bits analysis.
1510   DemandedBits *DB;
1511 
1512   /// Assumption cache.
1513   AssumptionCache *AC;
1514 
1515   /// Interface to emit optimization remarks.
1516   OptimizationRemarkEmitter *ORE;
1517 
1518   const Function *TheFunction;
1519 
1520   /// Loop Vectorize Hint.
1521   const LoopVectorizeHints *Hints;
1522 
1523   /// The interleave access information contains groups of interleaved accesses
1524   /// with the same stride and close to each other.
1525   InterleavedAccessInfo &InterleaveInfo;
1526 
1527   /// Values to ignore in the cost model.
1528   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1529 
1530   /// Values to ignore in the cost model when VF > 1.
1531   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1532 };
1533 
1534 } // end namespace llvm
1535 
1536 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
1537 // vectorization. The loop needs to be annotated with #pragma omp simd
1538 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
1539 // vector length information is not provided, vectorization is not considered
1540 // explicit. Interleave hints are not allowed either. These limitations will be
1541 // relaxed in the future.
1542 // Please, note that we are currently forced to abuse the pragma 'clang
1543 // vectorize' semantics. This pragma provides *auto-vectorization hints*
1544 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
1545 // provides *explicit vectorization hints* (LV can bypass legal checks and
1546 // assume that vectorization is legal). However, both hints are implemented
1547 // using the same metadata (llvm.loop.vectorize, processed by
1548 // LoopVectorizeHints). This will be fixed in the future when the native IR
1549 // representation for pragma 'omp simd' is introduced.
1550 static bool isExplicitVecOuterLoop(Loop *OuterLp,
1551                                    OptimizationRemarkEmitter *ORE) {
1552   assert(!OuterLp->empty() && "This is not an outer loop");
1553   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
1554 
1555   // Only outer loops with an explicit vectorization hint are supported.
1556   // Unannotated outer loops are ignored.
1557   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
1558     return false;
1559 
1560   Function *Fn = OuterLp->getHeader()->getParent();
1561   if (!Hints.allowVectorization(Fn, OuterLp,
1562                                 true /*VectorizeOnlyWhenForced*/)) {
1563     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
1564     return false;
1565   }
1566 
1567   if (Hints.getInterleave() > 1) {
1568     // TODO: Interleave support is future work.
1569     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
1570                          "outer loops.\n");
1571     Hints.emitRemarkWithHints();
1572     return false;
1573   }
1574 
1575   return true;
1576 }
1577 
1578 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
1579                                   OptimizationRemarkEmitter *ORE,
1580                                   SmallVectorImpl<Loop *> &V) {
1581   // Collect inner loops and outer loops without irreducible control flow. For
1582   // now, only collect outer loops that have explicit vectorization hints. If we
1583   // are stress testing the VPlan H-CFG construction, we collect the outermost
1584   // loop of every loop nest.
1585   if (L.empty() || VPlanBuildStressTest ||
1586       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
1587     LoopBlocksRPO RPOT(&L);
1588     RPOT.perform(LI);
1589     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
1590       V.push_back(&L);
1591       // TODO: Collect inner loops inside marked outer loops in case
1592       // vectorization fails for the outer loop. Do not invoke
1593       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
1594       // already known to be reducible. We can use an inherited attribute for
1595       // that.
1596       return;
1597     }
1598   }
1599   for (Loop *InnerL : L)
1600     collectSupportedLoops(*InnerL, LI, ORE, V);
1601 }
1602 
1603 namespace {
1604 
1605 /// The LoopVectorize Pass.
1606 struct LoopVectorize : public FunctionPass {
1607   /// Pass identification, replacement for typeid
1608   static char ID;
1609 
1610   LoopVectorizePass Impl;
1611 
1612   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
1613                          bool VectorizeOnlyWhenForced = false)
1614       : FunctionPass(ID),
1615         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
1616     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
1617   }
1618 
1619   bool runOnFunction(Function &F) override {
1620     if (skipFunction(F))
1621       return false;
1622 
1623     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1624     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1625     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1626     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1627     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
1628     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1629     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
1630     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1631     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1632     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
1633     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
1634     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
1635     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
1636 
1637     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
1638         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
1639 
1640     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
1641                         GetLAA, *ORE, PSI).MadeAnyChange;
1642   }
1643 
1644   void getAnalysisUsage(AnalysisUsage &AU) const override {
1645     AU.addRequired<AssumptionCacheTracker>();
1646     AU.addRequired<BlockFrequencyInfoWrapperPass>();
1647     AU.addRequired<DominatorTreeWrapperPass>();
1648     AU.addRequired<LoopInfoWrapperPass>();
1649     AU.addRequired<ScalarEvolutionWrapperPass>();
1650     AU.addRequired<TargetTransformInfoWrapperPass>();
1651     AU.addRequired<AAResultsWrapperPass>();
1652     AU.addRequired<LoopAccessLegacyAnalysis>();
1653     AU.addRequired<DemandedBitsWrapperPass>();
1654     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
1655     AU.addRequired<InjectTLIMappingsLegacy>();
1656 
1657     // We currently do not preserve loopinfo/dominator analyses with outer loop
1658     // vectorization. Until this is addressed, mark these analyses as preserved
1659     // only for non-VPlan-native path.
1660     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
1661     if (!EnableVPlanNativePath) {
1662       AU.addPreserved<LoopInfoWrapperPass>();
1663       AU.addPreserved<DominatorTreeWrapperPass>();
1664     }
1665 
1666     AU.addPreserved<BasicAAWrapperPass>();
1667     AU.addPreserved<GlobalsAAWrapperPass>();
1668     AU.addRequired<ProfileSummaryInfoWrapperPass>();
1669   }
1670 };
1671 
1672 } // end anonymous namespace
1673 
1674 //===----------------------------------------------------------------------===//
1675 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
1676 // LoopVectorizationCostModel and LoopVectorizationPlanner.
1677 //===----------------------------------------------------------------------===//
1678 
1679 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
1680   // We need to place the broadcast of invariant variables outside the loop,
1681   // but only if it's proven safe to do so. Else, broadcast will be inside
1682   // vector loop body.
1683   Instruction *Instr = dyn_cast<Instruction>(V);
1684   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
1685                      (!Instr ||
1686                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
1687   // Place the code for broadcasting invariant variables in the new preheader.
1688   IRBuilder<>::InsertPointGuard Guard(Builder);
1689   if (SafeToHoist)
1690     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1691 
1692   // Broadcast the scalar into all locations in the vector.
1693   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
1694 
1695   return Shuf;
1696 }
1697 
1698 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
1699     const InductionDescriptor &II, Value *Step, Instruction *EntryVal) {
1700   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1701          "Expected either an induction phi-node or a truncate of it!");
1702   Value *Start = II.getStartValue();
1703 
1704   // Construct the initial value of the vector IV in the vector loop preheader
1705   auto CurrIP = Builder.saveIP();
1706   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1707   if (isa<TruncInst>(EntryVal)) {
1708     assert(Start->getType()->isIntegerTy() &&
1709            "Truncation requires an integer type");
1710     auto *TruncType = cast<IntegerType>(EntryVal->getType());
1711     Step = Builder.CreateTrunc(Step, TruncType);
1712     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
1713   }
1714   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
1715   Value *SteppedStart =
1716       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
1717 
1718   // We create vector phi nodes for both integer and floating-point induction
1719   // variables. Here, we determine the kind of arithmetic we will perform.
1720   Instruction::BinaryOps AddOp;
1721   Instruction::BinaryOps MulOp;
1722   if (Step->getType()->isIntegerTy()) {
1723     AddOp = Instruction::Add;
1724     MulOp = Instruction::Mul;
1725   } else {
1726     AddOp = II.getInductionOpcode();
1727     MulOp = Instruction::FMul;
1728   }
1729 
1730   // Multiply the vectorization factor by the step using integer or
1731   // floating-point arithmetic as appropriate.
1732   Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF);
1733   Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
1734 
1735   // Create a vector splat to use in the induction update.
1736   //
1737   // FIXME: If the step is non-constant, we create the vector splat with
1738   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
1739   //        handle a constant vector splat.
1740   Value *SplatVF =
1741       isa<Constant>(Mul)
1742           ? ConstantVector::getSplat({VF, false}, cast<Constant>(Mul))
1743           : Builder.CreateVectorSplat(VF, Mul);
1744   Builder.restoreIP(CurrIP);
1745 
1746   // We may need to add the step a number of times, depending on the unroll
1747   // factor. The last of those goes into the PHI.
1748   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
1749                                     &*LoopVectorBody->getFirstInsertionPt());
1750   VecInd->setDebugLoc(EntryVal->getDebugLoc());
1751   Instruction *LastInduction = VecInd;
1752   for (unsigned Part = 0; Part < UF; ++Part) {
1753     VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction);
1754 
1755     if (isa<TruncInst>(EntryVal))
1756       addMetadata(LastInduction, EntryVal);
1757     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part);
1758 
1759     LastInduction = cast<Instruction>(addFastMathFlag(
1760         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
1761     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
1762   }
1763 
1764   // Move the last step to the end of the latch block. This ensures consistent
1765   // placement of all induction updates.
1766   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
1767   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
1768   auto *ICmp = cast<Instruction>(Br->getCondition());
1769   LastInduction->moveBefore(ICmp);
1770   LastInduction->setName("vec.ind.next");
1771 
1772   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
1773   VecInd->addIncoming(LastInduction, LoopVectorLatch);
1774 }
1775 
1776 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
1777   return Cost->isScalarAfterVectorization(I, VF) ||
1778          Cost->isProfitableToScalarize(I, VF);
1779 }
1780 
1781 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
1782   if (shouldScalarizeInstruction(IV))
1783     return true;
1784   auto isScalarInst = [&](User *U) -> bool {
1785     auto *I = cast<Instruction>(U);
1786     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
1787   };
1788   return llvm::any_of(IV->users(), isScalarInst);
1789 }
1790 
1791 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
1792     const InductionDescriptor &ID, const Instruction *EntryVal,
1793     Value *VectorLoopVal, unsigned Part, unsigned Lane) {
1794   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1795          "Expected either an induction phi-node or a truncate of it!");
1796 
1797   // This induction variable is not the phi from the original loop but the
1798   // newly-created IV based on the proof that casted Phi is equal to the
1799   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
1800   // re-uses the same InductionDescriptor that original IV uses but we don't
1801   // have to do any recording in this case - that is done when original IV is
1802   // processed.
1803   if (isa<TruncInst>(EntryVal))
1804     return;
1805 
1806   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
1807   if (Casts.empty())
1808     return;
1809   // Only the first Cast instruction in the Casts vector is of interest.
1810   // The rest of the Casts (if exist) have no uses outside the
1811   // induction update chain itself.
1812   Instruction *CastInst = *Casts.begin();
1813   if (Lane < UINT_MAX)
1814     VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal);
1815   else
1816     VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal);
1817 }
1818 
1819 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) {
1820   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
1821          "Primary induction variable must have an integer type");
1822 
1823   auto II = Legal->getInductionVars().find(IV);
1824   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
1825 
1826   auto ID = II->second;
1827   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
1828 
1829   // The value from the original loop to which we are mapping the new induction
1830   // variable.
1831   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
1832 
1833   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
1834 
1835   // Generate code for the induction step. Note that induction steps are
1836   // required to be loop-invariant
1837   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
1838     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
1839            "Induction step should be loop invariant");
1840     if (PSE.getSE()->isSCEVable(IV->getType())) {
1841       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
1842       return Exp.expandCodeFor(Step, Step->getType(),
1843                                LoopVectorPreHeader->getTerminator());
1844     }
1845     return cast<SCEVUnknown>(Step)->getValue();
1846   };
1847 
1848   // The scalar value to broadcast. This is derived from the canonical
1849   // induction variable. If a truncation type is given, truncate the canonical
1850   // induction variable and step. Otherwise, derive these values from the
1851   // induction descriptor.
1852   auto CreateScalarIV = [&](Value *&Step) -> Value * {
1853     Value *ScalarIV = Induction;
1854     if (IV != OldInduction) {
1855       ScalarIV = IV->getType()->isIntegerTy()
1856                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
1857                      : Builder.CreateCast(Instruction::SIToFP, Induction,
1858                                           IV->getType());
1859       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
1860       ScalarIV->setName("offset.idx");
1861     }
1862     if (Trunc) {
1863       auto *TruncType = cast<IntegerType>(Trunc->getType());
1864       assert(Step->getType()->isIntegerTy() &&
1865              "Truncation requires an integer step");
1866       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
1867       Step = Builder.CreateTrunc(Step, TruncType);
1868     }
1869     return ScalarIV;
1870   };
1871 
1872   // Create the vector values from the scalar IV, in the absence of creating a
1873   // vector IV.
1874   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
1875     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
1876     for (unsigned Part = 0; Part < UF; ++Part) {
1877       Value *EntryPart =
1878           getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode());
1879       VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart);
1880       if (Trunc)
1881         addMetadata(EntryPart, Trunc);
1882       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part);
1883     }
1884   };
1885 
1886   // Now do the actual transformations, and start with creating the step value.
1887   Value *Step = CreateStepValue(ID.getStep());
1888   if (VF <= 1) {
1889     Value *ScalarIV = CreateScalarIV(Step);
1890     CreateSplatIV(ScalarIV, Step);
1891     return;
1892   }
1893 
1894   // Determine if we want a scalar version of the induction variable. This is
1895   // true if the induction variable itself is not widened, or if it has at
1896   // least one user in the loop that is not widened.
1897   auto NeedsScalarIV = needsScalarInduction(EntryVal);
1898   if (!NeedsScalarIV) {
1899     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
1900     return;
1901   }
1902 
1903   // Try to create a new independent vector induction variable. If we can't
1904   // create the phi node, we will splat the scalar induction variable in each
1905   // loop iteration.
1906   if (!shouldScalarizeInstruction(EntryVal)) {
1907     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
1908     Value *ScalarIV = CreateScalarIV(Step);
1909     // Create scalar steps that can be used by instructions we will later
1910     // scalarize. Note that the addition of the scalar steps will not increase
1911     // the number of instructions in the loop in the common case prior to
1912     // InstCombine. We will be trading one vector extract for each scalar step.
1913     buildScalarSteps(ScalarIV, Step, EntryVal, ID);
1914     return;
1915   }
1916 
1917   // All IV users are scalar instructions, so only emit a scalar IV, not a
1918   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
1919   // predicate used by the masked loads/stores.
1920   Value *ScalarIV = CreateScalarIV(Step);
1921   if (!Cost->isScalarEpilogueAllowed())
1922     CreateSplatIV(ScalarIV, Step);
1923   buildScalarSteps(ScalarIV, Step, EntryVal, ID);
1924 }
1925 
1926 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
1927                                           Instruction::BinaryOps BinOp) {
1928   // Create and check the types.
1929   auto *ValVTy = cast<VectorType>(Val->getType());
1930   int VLen = ValVTy->getNumElements();
1931 
1932   Type *STy = Val->getType()->getScalarType();
1933   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
1934          "Induction Step must be an integer or FP");
1935   assert(Step->getType() == STy && "Step has wrong type");
1936 
1937   SmallVector<Constant *, 8> Indices;
1938 
1939   if (STy->isIntegerTy()) {
1940     // Create a vector of consecutive numbers from zero to VF.
1941     for (int i = 0; i < VLen; ++i)
1942       Indices.push_back(ConstantInt::get(STy, StartIdx + i));
1943 
1944     // Add the consecutive indices to the vector value.
1945     Constant *Cv = ConstantVector::get(Indices);
1946     assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
1947     Step = Builder.CreateVectorSplat(VLen, Step);
1948     assert(Step->getType() == Val->getType() && "Invalid step vec");
1949     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
1950     // which can be found from the original scalar operations.
1951     Step = Builder.CreateMul(Cv, Step);
1952     return Builder.CreateAdd(Val, Step, "induction");
1953   }
1954 
1955   // Floating point induction.
1956   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
1957          "Binary Opcode should be specified for FP induction");
1958   // Create a vector of consecutive numbers from zero to VF.
1959   for (int i = 0; i < VLen; ++i)
1960     Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
1961 
1962   // Add the consecutive indices to the vector value.
1963   Constant *Cv = ConstantVector::get(Indices);
1964 
1965   Step = Builder.CreateVectorSplat(VLen, Step);
1966 
1967   // Floating point operations had to be 'fast' to enable the induction.
1968   FastMathFlags Flags;
1969   Flags.setFast();
1970 
1971   Value *MulOp = Builder.CreateFMul(Cv, Step);
1972   if (isa<Instruction>(MulOp))
1973     // Have to check, MulOp may be a constant
1974     cast<Instruction>(MulOp)->setFastMathFlags(Flags);
1975 
1976   Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
1977   if (isa<Instruction>(BOp))
1978     cast<Instruction>(BOp)->setFastMathFlags(Flags);
1979   return BOp;
1980 }
1981 
1982 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
1983                                            Instruction *EntryVal,
1984                                            const InductionDescriptor &ID) {
1985   // We shouldn't have to build scalar steps if we aren't vectorizing.
1986   assert(VF > 1 && "VF should be greater than one");
1987 
1988   // Get the value type and ensure it and the step have the same integer type.
1989   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
1990   assert(ScalarIVTy == Step->getType() &&
1991          "Val and Step should have the same type");
1992 
1993   // We build scalar steps for both integer and floating-point induction
1994   // variables. Here, we determine the kind of arithmetic we will perform.
1995   Instruction::BinaryOps AddOp;
1996   Instruction::BinaryOps MulOp;
1997   if (ScalarIVTy->isIntegerTy()) {
1998     AddOp = Instruction::Add;
1999     MulOp = Instruction::Mul;
2000   } else {
2001     AddOp = ID.getInductionOpcode();
2002     MulOp = Instruction::FMul;
2003   }
2004 
2005   // Determine the number of scalars we need to generate for each unroll
2006   // iteration. If EntryVal is uniform, we only need to generate the first
2007   // lane. Otherwise, we generate all VF values.
2008   unsigned Lanes =
2009       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1
2010                                                                          : VF;
2011   // Compute the scalar steps and save the results in VectorLoopValueMap.
2012   for (unsigned Part = 0; Part < UF; ++Part) {
2013     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2014       auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane);
2015       auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
2016       auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
2017       VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add);
2018       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane);
2019     }
2020   }
2021 }
2022 
2023 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) {
2024   assert(V != Induction && "The new induction variable should not be used.");
2025   assert(!V->getType()->isVectorTy() && "Can't widen a vector");
2026   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2027 
2028   // If we have a stride that is replaced by one, do it here. Defer this for
2029   // the VPlan-native path until we start running Legal checks in that path.
2030   if (!EnableVPlanNativePath && Legal->hasStride(V))
2031     V = ConstantInt::get(V->getType(), 1);
2032 
2033   // If we have a vector mapped to this value, return it.
2034   if (VectorLoopValueMap.hasVectorValue(V, Part))
2035     return VectorLoopValueMap.getVectorValue(V, Part);
2036 
2037   // If the value has not been vectorized, check if it has been scalarized
2038   // instead. If it has been scalarized, and we actually need the value in
2039   // vector form, we will construct the vector values on demand.
2040   if (VectorLoopValueMap.hasAnyScalarValue(V)) {
2041     Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0});
2042 
2043     // If we've scalarized a value, that value should be an instruction.
2044     auto *I = cast<Instruction>(V);
2045 
2046     // If we aren't vectorizing, we can just copy the scalar map values over to
2047     // the vector map.
2048     if (VF == 1) {
2049       VectorLoopValueMap.setVectorValue(V, Part, ScalarValue);
2050       return ScalarValue;
2051     }
2052 
2053     // Get the last scalar instruction we generated for V and Part. If the value
2054     // is known to be uniform after vectorization, this corresponds to lane zero
2055     // of the Part unroll iteration. Otherwise, the last instruction is the one
2056     // we created for the last vector lane of the Part unroll iteration.
2057     unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1;
2058     auto *LastInst = cast<Instruction>(
2059         VectorLoopValueMap.getScalarValue(V, {Part, LastLane}));
2060 
2061     // Set the insert point after the last scalarized instruction. This ensures
2062     // the insertelement sequence will directly follow the scalar definitions.
2063     auto OldIP = Builder.saveIP();
2064     auto NewIP = std::next(BasicBlock::iterator(LastInst));
2065     Builder.SetInsertPoint(&*NewIP);
2066 
2067     // However, if we are vectorizing, we need to construct the vector values.
2068     // If the value is known to be uniform after vectorization, we can just
2069     // broadcast the scalar value corresponding to lane zero for each unroll
2070     // iteration. Otherwise, we construct the vector values using insertelement
2071     // instructions. Since the resulting vectors are stored in
2072     // VectorLoopValueMap, we will only generate the insertelements once.
2073     Value *VectorValue = nullptr;
2074     if (Cost->isUniformAfterVectorization(I, VF)) {
2075       VectorValue = getBroadcastInstrs(ScalarValue);
2076       VectorLoopValueMap.setVectorValue(V, Part, VectorValue);
2077     } else {
2078       // Initialize packing with insertelements to start from undef.
2079       Value *Undef = UndefValue::get(FixedVectorType::get(V->getType(), VF));
2080       VectorLoopValueMap.setVectorValue(V, Part, Undef);
2081       for (unsigned Lane = 0; Lane < VF; ++Lane)
2082         packScalarIntoVectorValue(V, {Part, Lane});
2083       VectorValue = VectorLoopValueMap.getVectorValue(V, Part);
2084     }
2085     Builder.restoreIP(OldIP);
2086     return VectorValue;
2087   }
2088 
2089   // If this scalar is unknown, assume that it is a constant or that it is
2090   // loop invariant. Broadcast V and save the value for future uses.
2091   Value *B = getBroadcastInstrs(V);
2092   VectorLoopValueMap.setVectorValue(V, Part, B);
2093   return B;
2094 }
2095 
2096 Value *
2097 InnerLoopVectorizer::getOrCreateScalarValue(Value *V,
2098                                             const VPIteration &Instance) {
2099   // If the value is not an instruction contained in the loop, it should
2100   // already be scalar.
2101   if (OrigLoop->isLoopInvariant(V))
2102     return V;
2103 
2104   assert(Instance.Lane > 0
2105              ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
2106              : true && "Uniform values only have lane zero");
2107 
2108   // If the value from the original loop has not been vectorized, it is
2109   // represented by UF x VF scalar values in the new loop. Return the requested
2110   // scalar value.
2111   if (VectorLoopValueMap.hasScalarValue(V, Instance))
2112     return VectorLoopValueMap.getScalarValue(V, Instance);
2113 
2114   // If the value has not been scalarized, get its entry in VectorLoopValueMap
2115   // for the given unroll part. If this entry is not a vector type (i.e., the
2116   // vectorization factor is one), there is no need to generate an
2117   // extractelement instruction.
2118   auto *U = getOrCreateVectorValue(V, Instance.Part);
2119   if (!U->getType()->isVectorTy()) {
2120     assert(VF == 1 && "Value not scalarized has non-vector type");
2121     return U;
2122   }
2123 
2124   // Otherwise, the value from the original loop has been vectorized and is
2125   // represented by UF vector values. Extract and return the requested scalar
2126   // value from the appropriate vector lane.
2127   return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane));
2128 }
2129 
2130 void InnerLoopVectorizer::packScalarIntoVectorValue(
2131     Value *V, const VPIteration &Instance) {
2132   assert(V != Induction && "The new induction variable should not be used.");
2133   assert(!V->getType()->isVectorTy() && "Can't pack a vector");
2134   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2135 
2136   Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance);
2137   Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part);
2138   VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst,
2139                                             Builder.getInt32(Instance.Lane));
2140   VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue);
2141 }
2142 
2143 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2144   assert(Vec->getType()->isVectorTy() && "Invalid type");
2145   SmallVector<int, 8> ShuffleMask;
2146   for (unsigned i = 0; i < VF; ++i)
2147     ShuffleMask.push_back(VF - i - 1);
2148 
2149   return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
2150                                      ShuffleMask, "reverse");
2151 }
2152 
2153 // Return whether we allow using masked interleave-groups (for dealing with
2154 // strided loads/stores that reside in predicated blocks, or for dealing
2155 // with gaps).
2156 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2157   // If an override option has been passed in for interleaved accesses, use it.
2158   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2159     return EnableMaskedInterleavedMemAccesses;
2160 
2161   return TTI.enableMaskedInterleavedAccessVectorization();
2162 }
2163 
2164 // Try to vectorize the interleave group that \p Instr belongs to.
2165 //
2166 // E.g. Translate following interleaved load group (factor = 3):
2167 //   for (i = 0; i < N; i+=3) {
2168 //     R = Pic[i];             // Member of index 0
2169 //     G = Pic[i+1];           // Member of index 1
2170 //     B = Pic[i+2];           // Member of index 2
2171 //     ... // do something to R, G, B
2172 //   }
2173 // To:
2174 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2175 //   %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9>   ; R elements
2176 //   %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10>  ; G elements
2177 //   %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11>  ; B elements
2178 //
2179 // Or translate following interleaved store group (factor = 3):
2180 //   for (i = 0; i < N; i+=3) {
2181 //     ... do something to R, G, B
2182 //     Pic[i]   = R;           // Member of index 0
2183 //     Pic[i+1] = G;           // Member of index 1
2184 //     Pic[i+2] = B;           // Member of index 2
2185 //   }
2186 // To:
2187 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2188 //   %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u>
2189 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2190 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2191 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2192 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2193     const InterleaveGroup<Instruction> *Group, VPTransformState &State,
2194     VPValue *Addr, VPValue *BlockInMask) {
2195   Instruction *Instr = Group->getInsertPos();
2196   const DataLayout &DL = Instr->getModule()->getDataLayout();
2197 
2198   // Prepare for the vector type of the interleaved load/store.
2199   Type *ScalarTy = getMemInstValueType(Instr);
2200   unsigned InterleaveFactor = Group->getFactor();
2201   auto *VecTy = FixedVectorType::get(ScalarTy, InterleaveFactor * VF);
2202 
2203   // Prepare for the new pointers.
2204   SmallVector<Value *, 2> AddrParts;
2205   unsigned Index = Group->getIndex(Instr);
2206 
2207   // TODO: extend the masked interleaved-group support to reversed access.
2208   assert((!BlockInMask || !Group->isReverse()) &&
2209          "Reversed masked interleave-group not supported.");
2210 
2211   // If the group is reverse, adjust the index to refer to the last vector lane
2212   // instead of the first. We adjust the index from the first vector lane,
2213   // rather than directly getting the pointer for lane VF - 1, because the
2214   // pointer operand of the interleaved access is supposed to be uniform. For
2215   // uniform instructions, we're only required to generate a value for the
2216   // first vector lane in each unroll iteration.
2217   if (Group->isReverse())
2218     Index += (VF - 1) * Group->getFactor();
2219 
2220   for (unsigned Part = 0; Part < UF; Part++) {
2221     Value *AddrPart = State.get(Addr, {Part, 0});
2222     setDebugLocFromInst(Builder, AddrPart);
2223 
2224     // Notice current instruction could be any index. Need to adjust the address
2225     // to the member of index 0.
2226     //
2227     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2228     //       b = A[i];       // Member of index 0
2229     // Current pointer is pointed to A[i+1], adjust it to A[i].
2230     //
2231     // E.g.  A[i+1] = a;     // Member of index 1
2232     //       A[i]   = b;     // Member of index 0
2233     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2234     // Current pointer is pointed to A[i+2], adjust it to A[i].
2235 
2236     bool InBounds = false;
2237     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2238       InBounds = gep->isInBounds();
2239     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2240     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2241 
2242     // Cast to the vector pointer type.
2243     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2244     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2245     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2246   }
2247 
2248   setDebugLocFromInst(Builder, Instr);
2249   Value *UndefVec = UndefValue::get(VecTy);
2250 
2251   Value *MaskForGaps = nullptr;
2252   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2253     MaskForGaps = createBitMaskForGaps(Builder, VF, *Group);
2254     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2255   }
2256 
2257   // Vectorize the interleaved load group.
2258   if (isa<LoadInst>(Instr)) {
2259     // For each unroll part, create a wide load for the group.
2260     SmallVector<Value *, 2> NewLoads;
2261     for (unsigned Part = 0; Part < UF; Part++) {
2262       Instruction *NewLoad;
2263       if (BlockInMask || MaskForGaps) {
2264         assert(useMaskedInterleavedAccesses(*TTI) &&
2265                "masked interleaved groups are not allowed.");
2266         Value *GroupMask = MaskForGaps;
2267         if (BlockInMask) {
2268           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2269           auto *Undefs = UndefValue::get(BlockInMaskPart->getType());
2270           Value *ShuffledMask = Builder.CreateShuffleVector(
2271               BlockInMaskPart, Undefs,
2272               createReplicatedMask(InterleaveFactor, VF), "interleaved.mask");
2273           GroupMask = MaskForGaps
2274                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2275                                                 MaskForGaps)
2276                           : ShuffledMask;
2277         }
2278         NewLoad =
2279             Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
2280                                      GroupMask, UndefVec, "wide.masked.vec");
2281       }
2282       else
2283         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2284                                             Group->getAlign(), "wide.vec");
2285       Group->addMetadata(NewLoad);
2286       NewLoads.push_back(NewLoad);
2287     }
2288 
2289     // For each member in the group, shuffle out the appropriate data from the
2290     // wide loads.
2291     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2292       Instruction *Member = Group->getMember(I);
2293 
2294       // Skip the gaps in the group.
2295       if (!Member)
2296         continue;
2297 
2298       auto StrideMask = createStrideMask(I, InterleaveFactor, VF);
2299       for (unsigned Part = 0; Part < UF; Part++) {
2300         Value *StridedVec = Builder.CreateShuffleVector(
2301             NewLoads[Part], UndefVec, StrideMask, "strided.vec");
2302 
2303         // If this member has different type, cast the result type.
2304         if (Member->getType() != ScalarTy) {
2305           VectorType *OtherVTy = FixedVectorType::get(Member->getType(), VF);
2306           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2307         }
2308 
2309         if (Group->isReverse())
2310           StridedVec = reverseVector(StridedVec);
2311 
2312         VectorLoopValueMap.setVectorValue(Member, Part, StridedVec);
2313       }
2314     }
2315     return;
2316   }
2317 
2318   // The sub vector type for current instruction.
2319   auto *SubVT = FixedVectorType::get(ScalarTy, VF);
2320 
2321   // Vectorize the interleaved store group.
2322   for (unsigned Part = 0; Part < UF; Part++) {
2323     // Collect the stored vector from each member.
2324     SmallVector<Value *, 4> StoredVecs;
2325     for (unsigned i = 0; i < InterleaveFactor; i++) {
2326       // Interleaved store group doesn't allow a gap, so each index has a member
2327       Instruction *Member = Group->getMember(i);
2328       assert(Member && "Fail to get a member from an interleaved store group");
2329 
2330       Value *StoredVec = getOrCreateVectorValue(
2331           cast<StoreInst>(Member)->getValueOperand(), Part);
2332       if (Group->isReverse())
2333         StoredVec = reverseVector(StoredVec);
2334 
2335       // If this member has different type, cast it to a unified type.
2336 
2337       if (StoredVec->getType() != SubVT)
2338         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2339 
2340       StoredVecs.push_back(StoredVec);
2341     }
2342 
2343     // Concatenate all vectors into a wide vector.
2344     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2345 
2346     // Interleave the elements in the wide vector.
2347     Value *IVec = Builder.CreateShuffleVector(
2348         WideVec, UndefVec, createInterleaveMask(VF, InterleaveFactor),
2349         "interleaved.vec");
2350 
2351     Instruction *NewStoreInstr;
2352     if (BlockInMask) {
2353       Value *BlockInMaskPart = State.get(BlockInMask, Part);
2354       auto *Undefs = UndefValue::get(BlockInMaskPart->getType());
2355       Value *ShuffledMask = Builder.CreateShuffleVector(
2356           BlockInMaskPart, Undefs, createReplicatedMask(InterleaveFactor, VF),
2357           "interleaved.mask");
2358       NewStoreInstr = Builder.CreateMaskedStore(
2359           IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2360     }
2361     else
2362       NewStoreInstr =
2363           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2364 
2365     Group->addMetadata(NewStoreInstr);
2366   }
2367 }
2368 
2369 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
2370                                                      VPTransformState &State,
2371                                                      VPValue *Addr,
2372                                                      VPValue *StoredValue,
2373                                                      VPValue *BlockInMask) {
2374   // Attempt to issue a wide load.
2375   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2376   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2377 
2378   assert((LI || SI) && "Invalid Load/Store instruction");
2379   assert((!SI || StoredValue) && "No stored value provided for widened store");
2380   assert((!LI || !StoredValue) && "Stored value provided for widened load");
2381 
2382   LoopVectorizationCostModel::InstWidening Decision =
2383       Cost->getWideningDecision(Instr, VF);
2384   assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2385           Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2386           Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2387          "CM decision is not to widen the memory instruction");
2388 
2389   Type *ScalarDataTy = getMemInstValueType(Instr);
2390   auto *DataTy = FixedVectorType::get(ScalarDataTy, VF);
2391   const Align Alignment = getLoadStoreAlignment(Instr);
2392 
2393   // Determine if the pointer operand of the access is either consecutive or
2394   // reverse consecutive.
2395   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2396   bool ConsecutiveStride =
2397       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2398   bool CreateGatherScatter =
2399       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2400 
2401   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2402   // gather/scatter. Otherwise Decision should have been to Scalarize.
2403   assert((ConsecutiveStride || CreateGatherScatter) &&
2404          "The instruction should be scalarized");
2405   (void)ConsecutiveStride;
2406 
2407   VectorParts BlockInMaskParts(UF);
2408   bool isMaskRequired = BlockInMask;
2409   if (isMaskRequired)
2410     for (unsigned Part = 0; Part < UF; ++Part)
2411       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2412 
2413   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2414     // Calculate the pointer for the specific unroll-part.
2415     GetElementPtrInst *PartPtr = nullptr;
2416 
2417     bool InBounds = false;
2418     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2419       InBounds = gep->isInBounds();
2420 
2421     if (Reverse) {
2422       // If the address is consecutive but reversed, then the
2423       // wide store needs to start at the last vector element.
2424       PartPtr = cast<GetElementPtrInst>(
2425           Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(-Part * VF)));
2426       PartPtr->setIsInBounds(InBounds);
2427       PartPtr = cast<GetElementPtrInst>(
2428           Builder.CreateGEP(ScalarDataTy, PartPtr, Builder.getInt32(1 - VF)));
2429       PartPtr->setIsInBounds(InBounds);
2430       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2431         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2432     } else {
2433       PartPtr = cast<GetElementPtrInst>(
2434           Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(Part * VF)));
2435       PartPtr->setIsInBounds(InBounds);
2436     }
2437 
2438     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2439     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2440   };
2441 
2442   // Handle Stores:
2443   if (SI) {
2444     setDebugLocFromInst(Builder, SI);
2445 
2446     for (unsigned Part = 0; Part < UF; ++Part) {
2447       Instruction *NewSI = nullptr;
2448       Value *StoredVal = State.get(StoredValue, Part);
2449       if (CreateGatherScatter) {
2450         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2451         Value *VectorGep = State.get(Addr, Part);
2452         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2453                                             MaskPart);
2454       } else {
2455         if (Reverse) {
2456           // If we store to reverse consecutive memory locations, then we need
2457           // to reverse the order of elements in the stored value.
2458           StoredVal = reverseVector(StoredVal);
2459           // We don't want to update the value in the map as it might be used in
2460           // another expression. So don't call resetVectorValue(StoredVal).
2461         }
2462         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
2463         if (isMaskRequired)
2464           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2465                                             BlockInMaskParts[Part]);
2466         else
2467           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2468       }
2469       addMetadata(NewSI, SI);
2470     }
2471     return;
2472   }
2473 
2474   // Handle loads.
2475   assert(LI && "Must have a load instruction");
2476   setDebugLocFromInst(Builder, LI);
2477   for (unsigned Part = 0; Part < UF; ++Part) {
2478     Value *NewLI;
2479     if (CreateGatherScatter) {
2480       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2481       Value *VectorGep = State.get(Addr, Part);
2482       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2483                                          nullptr, "wide.masked.gather");
2484       addMetadata(NewLI, LI);
2485     } else {
2486       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
2487       if (isMaskRequired)
2488         NewLI = Builder.CreateMaskedLoad(
2489             VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy),
2490             "wide.masked.load");
2491       else
2492         NewLI =
2493             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
2494 
2495       // Add metadata to the load, but setVectorValue to the reverse shuffle.
2496       addMetadata(NewLI, LI);
2497       if (Reverse)
2498         NewLI = reverseVector(NewLI);
2499     }
2500     VectorLoopValueMap.setVectorValue(Instr, Part, NewLI);
2501   }
2502 }
2503 
2504 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPUser &User,
2505                                                const VPIteration &Instance,
2506                                                bool IfPredicateInstr,
2507                                                VPTransformState &State) {
2508   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2509 
2510   setDebugLocFromInst(Builder, Instr);
2511 
2512   // Does this instruction return a value ?
2513   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2514 
2515   Instruction *Cloned = Instr->clone();
2516   if (!IsVoidRetTy)
2517     Cloned->setName(Instr->getName() + ".cloned");
2518 
2519   // Replace the operands of the cloned instructions with their scalar
2520   // equivalents in the new loop.
2521   for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) {
2522     auto *NewOp = State.get(User.getOperand(op), Instance);
2523     Cloned->setOperand(op, NewOp);
2524   }
2525   addNewMetadata(Cloned, Instr);
2526 
2527   // Place the cloned scalar in the new loop.
2528   Builder.Insert(Cloned);
2529 
2530   // Add the cloned scalar to the scalar map entry.
2531   VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned);
2532 
2533   // If we just cloned a new assumption, add it the assumption cache.
2534   if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
2535     if (II->getIntrinsicID() == Intrinsic::assume)
2536       AC->registerAssumption(II);
2537 
2538   // End if-block.
2539   if (IfPredicateInstr)
2540     PredicatedInstructions.push_back(Cloned);
2541 }
2542 
2543 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
2544                                                       Value *End, Value *Step,
2545                                                       Instruction *DL) {
2546   BasicBlock *Header = L->getHeader();
2547   BasicBlock *Latch = L->getLoopLatch();
2548   // As we're just creating this loop, it's possible no latch exists
2549   // yet. If so, use the header as this will be a single block loop.
2550   if (!Latch)
2551     Latch = Header;
2552 
2553   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
2554   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
2555   setDebugLocFromInst(Builder, OldInst);
2556   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
2557 
2558   Builder.SetInsertPoint(Latch->getTerminator());
2559   setDebugLocFromInst(Builder, OldInst);
2560 
2561   // Create i+1 and fill the PHINode.
2562   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
2563   Induction->addIncoming(Start, L->getLoopPreheader());
2564   Induction->addIncoming(Next, Latch);
2565   // Create the compare.
2566   Value *ICmp = Builder.CreateICmpEQ(Next, End);
2567   Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
2568 
2569   // Now we have two terminators. Remove the old one from the block.
2570   Latch->getTerminator()->eraseFromParent();
2571 
2572   return Induction;
2573 }
2574 
2575 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
2576   if (TripCount)
2577     return TripCount;
2578 
2579   assert(L && "Create Trip Count for null loop.");
2580   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2581   // Find the loop boundaries.
2582   ScalarEvolution *SE = PSE.getSE();
2583   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2584   assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
2585          "Invalid loop count");
2586 
2587   Type *IdxTy = Legal->getWidestInductionType();
2588   assert(IdxTy && "No type for induction");
2589 
2590   // The exit count might have the type of i64 while the phi is i32. This can
2591   // happen if we have an induction variable that is sign extended before the
2592   // compare. The only way that we get a backedge taken count is that the
2593   // induction variable was signed and as such will not overflow. In such a case
2594   // truncation is legal.
2595   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
2596       IdxTy->getPrimitiveSizeInBits())
2597     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2598   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2599 
2600   // Get the total trip count from the count by adding 1.
2601   const SCEV *ExitCount = SE->getAddExpr(
2602       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2603 
2604   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
2605 
2606   // Expand the trip count and place the new instructions in the preheader.
2607   // Notice that the pre-header does not change, only the loop body.
2608   SCEVExpander Exp(*SE, DL, "induction");
2609 
2610   // Count holds the overall loop count (N).
2611   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2612                                 L->getLoopPreheader()->getTerminator());
2613 
2614   if (TripCount->getType()->isPointerTy())
2615     TripCount =
2616         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2617                                     L->getLoopPreheader()->getTerminator());
2618 
2619   return TripCount;
2620 }
2621 
2622 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
2623   if (VectorTripCount)
2624     return VectorTripCount;
2625 
2626   Value *TC = getOrCreateTripCount(L);
2627   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2628 
2629   Type *Ty = TC->getType();
2630   Constant *Step = ConstantInt::get(Ty, VF * UF);
2631 
2632   // If the tail is to be folded by masking, round the number of iterations N
2633   // up to a multiple of Step instead of rounding down. This is done by first
2634   // adding Step-1 and then rounding down. Note that it's ok if this addition
2635   // overflows: the vector induction variable will eventually wrap to zero given
2636   // that it starts at zero and its Step is a power of two; the loop will then
2637   // exit, with the last early-exit vector comparison also producing all-true.
2638   if (Cost->foldTailByMasking()) {
2639     assert(isPowerOf2_32(VF * UF) &&
2640            "VF*UF must be a power of 2 when folding tail by masking");
2641     TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up");
2642   }
2643 
2644   // Now we need to generate the expression for the part of the loop that the
2645   // vectorized body will execute. This is equal to N - (N % Step) if scalar
2646   // iterations are not required for correctness, or N - Step, otherwise. Step
2647   // is equal to the vectorization factor (number of SIMD elements) times the
2648   // unroll factor (number of SIMD instructions).
2649   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2650 
2651   // If there is a non-reversed interleaved group that may speculatively access
2652   // memory out-of-bounds, we need to ensure that there will be at least one
2653   // iteration of the scalar epilogue loop. Thus, if the step evenly divides
2654   // the trip count, we set the remainder to be equal to the step. If the step
2655   // does not evenly divide the trip count, no adjustment is necessary since
2656   // there will already be scalar iterations. Note that the minimum iterations
2657   // check ensures that N >= Step.
2658   if (VF > 1 && Cost->requiresScalarEpilogue()) {
2659     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2660     R = Builder.CreateSelect(IsZero, Step, R);
2661   }
2662 
2663   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2664 
2665   return VectorTripCount;
2666 }
2667 
2668 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
2669                                                    const DataLayout &DL) {
2670   // Verify that V is a vector type with same number of elements as DstVTy.
2671   unsigned VF = DstVTy->getNumElements();
2672   VectorType *SrcVecTy = cast<VectorType>(V->getType());
2673   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
2674   Type *SrcElemTy = SrcVecTy->getElementType();
2675   Type *DstElemTy = DstVTy->getElementType();
2676   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2677          "Vector elements must have same size");
2678 
2679   // Do a direct cast if element types are castable.
2680   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2681     return Builder.CreateBitOrPointerCast(V, DstVTy);
2682   }
2683   // V cannot be directly casted to desired vector type.
2684   // May happen when V is a floating point vector but DstVTy is a vector of
2685   // pointers or vice-versa. Handle this using a two-step bitcast using an
2686   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2687   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2688          "Only one type should be a pointer type");
2689   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2690          "Only one type should be a floating point type");
2691   Type *IntTy =
2692       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2693   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
2694   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2695   return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
2696 }
2697 
2698 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
2699                                                          BasicBlock *Bypass) {
2700   Value *Count = getOrCreateTripCount(L);
2701   // Reuse existing vector loop preheader for TC checks.
2702   // Note that new preheader block is generated for vector loop.
2703   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
2704   IRBuilder<> Builder(TCCheckBlock->getTerminator());
2705 
2706   // Generate code to check if the loop's trip count is less than VF * UF, or
2707   // equal to it in case a scalar epilogue is required; this implies that the
2708   // vector trip count is zero. This check also covers the case where adding one
2709   // to the backedge-taken count overflowed leading to an incorrect trip count
2710   // of zero. In this case we will also jump to the scalar loop.
2711   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
2712                                           : ICmpInst::ICMP_ULT;
2713 
2714   // If tail is to be folded, vector loop takes care of all iterations.
2715   Value *CheckMinIters = Builder.getFalse();
2716   if (!Cost->foldTailByMasking())
2717     CheckMinIters = Builder.CreateICmp(
2718         P, Count, ConstantInt::get(Count->getType(), VF * UF),
2719         "min.iters.check");
2720 
2721   // Create new preheader for vector loop.
2722   LoopVectorPreHeader =
2723       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
2724                  "vector.ph");
2725 
2726   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
2727                                DT->getNode(Bypass)->getIDom()) &&
2728          "TC check is expected to dominate Bypass");
2729 
2730   // Update dominator for Bypass & LoopExit.
2731   DT->changeImmediateDominator(Bypass, TCCheckBlock);
2732   DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
2733 
2734   ReplaceInstWithInst(
2735       TCCheckBlock->getTerminator(),
2736       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
2737   LoopBypassBlocks.push_back(TCCheckBlock);
2738 }
2739 
2740 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
2741   // Reuse existing vector loop preheader for SCEV checks.
2742   // Note that new preheader block is generated for vector loop.
2743   BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader;
2744 
2745   // Generate the code to check that the SCEV assumptions that we made.
2746   // We want the new basic block to start at the first instruction in a
2747   // sequence of instructions that form a check.
2748   SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
2749                    "scev.check");
2750   Value *SCEVCheck = Exp.expandCodeForPredicate(
2751       &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator());
2752 
2753   if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
2754     if (C->isZero())
2755       return;
2756 
2757   assert(!SCEVCheckBlock->getParent()->hasOptSize() &&
2758          "Cannot SCEV check stride or overflow when optimizing for size");
2759 
2760   SCEVCheckBlock->setName("vector.scevcheck");
2761   // Create new preheader for vector loop.
2762   LoopVectorPreHeader =
2763       SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI,
2764                  nullptr, "vector.ph");
2765 
2766   // Update dominator only if this is first RT check.
2767   if (LoopBypassBlocks.empty()) {
2768     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
2769     DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
2770   }
2771 
2772   ReplaceInstWithInst(
2773       SCEVCheckBlock->getTerminator(),
2774       BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck));
2775   LoopBypassBlocks.push_back(SCEVCheckBlock);
2776   AddedSafetyChecks = true;
2777 }
2778 
2779 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
2780   // VPlan-native path does not do any analysis for runtime checks currently.
2781   if (EnableVPlanNativePath)
2782     return;
2783 
2784   // Reuse existing vector loop preheader for runtime memory checks.
2785   // Note that new preheader block is generated for vector loop.
2786   BasicBlock *const MemCheckBlock = L->getLoopPreheader();
2787 
2788   // Generate the code that checks in runtime if arrays overlap. We put the
2789   // checks into a separate block to make the more common case of few elements
2790   // faster.
2791   auto *LAI = Legal->getLAI();
2792   const auto &RtPtrChecking = *LAI->getRuntimePointerChecking();
2793   if (!RtPtrChecking.Need)
2794     return;
2795   Instruction *FirstCheckInst;
2796   Instruction *MemRuntimeCheck;
2797   std::tie(FirstCheckInst, MemRuntimeCheck) =
2798       addRuntimeChecks(MemCheckBlock->getTerminator(), OrigLoop,
2799                        RtPtrChecking.getChecks(), RtPtrChecking.getSE());
2800   assert(MemRuntimeCheck && "no RT checks generated although RtPtrChecking "
2801                             "claimed checks are required");
2802 
2803   if (MemCheckBlock->getParent()->hasOptSize()) {
2804     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
2805            "Cannot emit memory checks when optimizing for size, unless forced "
2806            "to vectorize.");
2807     ORE->emit([&]() {
2808       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
2809                                         L->getStartLoc(), L->getHeader())
2810              << "Code-size may be reduced by not forcing "
2811                 "vectorization, or by source-code modifications "
2812                 "eliminating the need for runtime checks "
2813                 "(e.g., adding 'restrict').";
2814     });
2815   }
2816 
2817   MemCheckBlock->setName("vector.memcheck");
2818   // Create new preheader for vector loop.
2819   LoopVectorPreHeader =
2820       SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr,
2821                  "vector.ph");
2822 
2823   // Update dominator only if this is first RT check.
2824   if (LoopBypassBlocks.empty()) {
2825     DT->changeImmediateDominator(Bypass, MemCheckBlock);
2826     DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock);
2827   }
2828 
2829   ReplaceInstWithInst(
2830       MemCheckBlock->getTerminator(),
2831       BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheck));
2832   LoopBypassBlocks.push_back(MemCheckBlock);
2833   AddedSafetyChecks = true;
2834 
2835   // We currently don't use LoopVersioning for the actual loop cloning but we
2836   // still use it to add the noalias metadata.
2837   LVer = std::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
2838                                           PSE.getSE());
2839   LVer->prepareNoAliasMetadata();
2840 }
2841 
2842 Value *InnerLoopVectorizer::emitTransformedIndex(
2843     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
2844     const InductionDescriptor &ID) const {
2845 
2846   SCEVExpander Exp(*SE, DL, "induction");
2847   auto Step = ID.getStep();
2848   auto StartValue = ID.getStartValue();
2849   assert(Index->getType() == Step->getType() &&
2850          "Index type does not match StepValue type");
2851 
2852   // Note: the IR at this point is broken. We cannot use SE to create any new
2853   // SCEV and then expand it, hoping that SCEV's simplification will give us
2854   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2855   // lead to various SCEV crashes. So all we can do is to use builder and rely
2856   // on InstCombine for future simplifications. Here we handle some trivial
2857   // cases only.
2858   auto CreateAdd = [&B](Value *X, Value *Y) {
2859     assert(X->getType() == Y->getType() && "Types don't match!");
2860     if (auto *CX = dyn_cast<ConstantInt>(X))
2861       if (CX->isZero())
2862         return Y;
2863     if (auto *CY = dyn_cast<ConstantInt>(Y))
2864       if (CY->isZero())
2865         return X;
2866     return B.CreateAdd(X, Y);
2867   };
2868 
2869   auto CreateMul = [&B](Value *X, Value *Y) {
2870     assert(X->getType() == Y->getType() && "Types don't match!");
2871     if (auto *CX = dyn_cast<ConstantInt>(X))
2872       if (CX->isOne())
2873         return Y;
2874     if (auto *CY = dyn_cast<ConstantInt>(Y))
2875       if (CY->isOne())
2876         return X;
2877     return B.CreateMul(X, Y);
2878   };
2879 
2880   // Get a suitable insert point for SCEV expansion. For blocks in the vector
2881   // loop, choose the end of the vector loop header (=LoopVectorBody), because
2882   // the DomTree is not kept up-to-date for additional blocks generated in the
2883   // vector loop. By using the header as insertion point, we guarantee that the
2884   // expanded instructions dominate all their uses.
2885   auto GetInsertPoint = [this, &B]() {
2886     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
2887     if (InsertBB != LoopVectorBody &&
2888         LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB))
2889       return LoopVectorBody->getTerminator();
2890     return &*B.GetInsertPoint();
2891   };
2892   switch (ID.getKind()) {
2893   case InductionDescriptor::IK_IntInduction: {
2894     assert(Index->getType() == StartValue->getType() &&
2895            "Index type does not match StartValue type");
2896     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
2897       return B.CreateSub(StartValue, Index);
2898     auto *Offset = CreateMul(
2899         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
2900     return CreateAdd(StartValue, Offset);
2901   }
2902   case InductionDescriptor::IK_PtrInduction: {
2903     assert(isa<SCEVConstant>(Step) &&
2904            "Expected constant step for pointer induction");
2905     return B.CreateGEP(
2906         StartValue->getType()->getPointerElementType(), StartValue,
2907         CreateMul(Index,
2908                   Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())));
2909   }
2910   case InductionDescriptor::IK_FpInduction: {
2911     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2912     auto InductionBinOp = ID.getInductionBinOp();
2913     assert(InductionBinOp &&
2914            (InductionBinOp->getOpcode() == Instruction::FAdd ||
2915             InductionBinOp->getOpcode() == Instruction::FSub) &&
2916            "Original bin op should be defined for FP induction");
2917 
2918     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
2919 
2920     // Floating point operations had to be 'fast' to enable the induction.
2921     FastMathFlags Flags;
2922     Flags.setFast();
2923 
2924     Value *MulExp = B.CreateFMul(StepValue, Index);
2925     if (isa<Instruction>(MulExp))
2926       // We have to check, the MulExp may be a constant.
2927       cast<Instruction>(MulExp)->setFastMathFlags(Flags);
2928 
2929     Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2930                                "induction");
2931     if (isa<Instruction>(BOp))
2932       cast<Instruction>(BOp)->setFastMathFlags(Flags);
2933 
2934     return BOp;
2935   }
2936   case InductionDescriptor::IK_NoInduction:
2937     return nullptr;
2938   }
2939   llvm_unreachable("invalid enum");
2940 }
2941 
2942 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
2943   /*
2944    In this function we generate a new loop. The new loop will contain
2945    the vectorized instructions while the old loop will continue to run the
2946    scalar remainder.
2947 
2948        [ ] <-- loop iteration number check.
2949     /   |
2950    /    v
2951   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
2952   |  /  |
2953   | /   v
2954   ||   [ ]     <-- vector pre header.
2955   |/    |
2956   |     v
2957   |    [  ] \
2958   |    [  ]_|   <-- vector loop.
2959   |     |
2960   |     v
2961   |   -[ ]   <--- middle-block.
2962   |  /  |
2963   | /   v
2964   -|- >[ ]     <--- new preheader.
2965    |    |
2966    |    v
2967    |   [ ] \
2968    |   [ ]_|   <-- old scalar loop to handle remainder.
2969     \   |
2970      \  v
2971       >[ ]     <-- exit block.
2972    ...
2973    */
2974 
2975   MDNode *OrigLoopID = OrigLoop->getLoopID();
2976 
2977   // Some loops have a single integer induction variable, while other loops
2978   // don't. One example is c++ iterators that often have multiple pointer
2979   // induction variables. In the code below we also support a case where we
2980   // don't have a single induction variable.
2981   //
2982   // We try to obtain an induction variable from the original loop as hard
2983   // as possible. However if we don't find one that:
2984   //   - is an integer
2985   //   - counts from zero, stepping by one
2986   //   - is the size of the widest induction variable type
2987   // then we create a new one.
2988   OldInduction = Legal->getPrimaryInduction();
2989   Type *IdxTy = Legal->getWidestInductionType();
2990 
2991   // Split the single block loop into the two loop structure described above.
2992   LoopScalarBody = OrigLoop->getHeader();
2993   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
2994   LoopExitBlock = OrigLoop->getExitBlock();
2995   assert(LoopExitBlock && "Must have an exit block");
2996   assert(LoopVectorPreHeader && "Invalid loop structure");
2997 
2998   LoopMiddleBlock =
2999       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3000                  LI, nullptr, "middle.block");
3001   LoopScalarPreHeader =
3002       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3003                  nullptr, "scalar.ph");
3004   // We intentionally don't let SplitBlock to update LoopInfo since
3005   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3006   // LoopVectorBody is explicitly added to the correct place few lines later.
3007   LoopVectorBody =
3008       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3009                  nullptr, nullptr, "vector.body");
3010 
3011   // Update dominator for loop exit.
3012   DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3013 
3014   // Create and register the new vector loop.
3015   Loop *Lp = LI->AllocateLoop();
3016   Loop *ParentLoop = OrigLoop->getParentLoop();
3017 
3018   // Insert the new loop into the loop nest and register the new basic blocks
3019   // before calling any utilities such as SCEV that require valid LoopInfo.
3020   if (ParentLoop) {
3021     ParentLoop->addChildLoop(Lp);
3022   } else {
3023     LI->addTopLevelLoop(Lp);
3024   }
3025   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3026 
3027   // Find the loop boundaries.
3028   Value *Count = getOrCreateTripCount(Lp);
3029 
3030   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3031 
3032   // Now, compare the new count to zero. If it is zero skip the vector loop and
3033   // jump to the scalar loop. This check also covers the case where the
3034   // backedge-taken count is uint##_max: adding one to it will overflow leading
3035   // to an incorrect trip count of zero. In this (rare) case we will also jump
3036   // to the scalar loop.
3037   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3038 
3039   // Generate the code to check any assumptions that we've made for SCEV
3040   // expressions.
3041   emitSCEVChecks(Lp, LoopScalarPreHeader);
3042 
3043   // Generate the code that checks in runtime if arrays overlap. We put the
3044   // checks into a separate block to make the more common case of few elements
3045   // faster.
3046   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3047 
3048   // Generate the induction variable.
3049   // The loop step is equal to the vectorization factor (num of SIMD elements)
3050   // times the unroll factor (num of SIMD instructions).
3051   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3052   Constant *Step = ConstantInt::get(IdxTy, VF * UF);
3053   Induction =
3054       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3055                               getDebugLocFromInstOrOperands(OldInduction));
3056 
3057   // We are going to resume the execution of the scalar loop.
3058   // Go over all of the induction variables that we found and fix the
3059   // PHIs that are left in the scalar version of the loop.
3060   // The starting values of PHI nodes depend on the counter of the last
3061   // iteration in the vectorized loop.
3062   // If we come from a bypass edge then we need to start from the original
3063   // start value.
3064 
3065   // This variable saves the new starting index for the scalar loop. It is used
3066   // to test if there are any tail iterations left once the vector loop has
3067   // completed.
3068   for (auto &InductionEntry : Legal->getInductionVars()) {
3069     PHINode *OrigPhi = InductionEntry.first;
3070     InductionDescriptor II = InductionEntry.second;
3071 
3072     // Create phi nodes to merge from the  backedge-taken check block.
3073     PHINode *BCResumeVal =
3074         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3075                         LoopScalarPreHeader->getTerminator());
3076     // Copy original phi DL over to the new one.
3077     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3078     Value *&EndValue = IVEndValues[OrigPhi];
3079     if (OrigPhi == OldInduction) {
3080       // We know what the end value is.
3081       EndValue = CountRoundDown;
3082     } else {
3083       IRBuilder<> B(Lp->getLoopPreheader()->getTerminator());
3084       Type *StepType = II.getStep()->getType();
3085       Instruction::CastOps CastOp =
3086           CastInst::getCastOpcode(CountRoundDown, true, StepType, true);
3087       Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd");
3088       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3089       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3090       EndValue->setName("ind.end");
3091     }
3092 
3093     // The new PHI merges the original incoming value, in case of a bypass,
3094     // or the value at the end of the vectorized loop.
3095     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3096 
3097     // Fix the scalar body counter (PHI node).
3098     // The old induction's phi node in the scalar body needs the truncated
3099     // value.
3100     for (BasicBlock *BB : LoopBypassBlocks)
3101       BCResumeVal->addIncoming(II.getStartValue(), BB);
3102     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3103   }
3104 
3105   // We need the OrigLoop (scalar loop part) latch terminator to help
3106   // produce correct debug info for the middle block BB instructions.
3107   // The legality check stage guarantees that the loop will have a single
3108   // latch.
3109   assert(isa<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()) &&
3110          "Scalar loop latch terminator isn't a branch");
3111   BranchInst *ScalarLatchBr =
3112       cast<BranchInst>(OrigLoop->getLoopLatch()->getTerminator());
3113 
3114   // Add a check in the middle block to see if we have completed
3115   // all of the iterations in the first vector loop.
3116   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3117   // If tail is to be folded, we know we don't need to run the remainder.
3118   Value *CmpN = Builder.getTrue();
3119   if (!Cost->foldTailByMasking()) {
3120     CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
3121                            CountRoundDown, "cmp.n",
3122                            LoopMiddleBlock->getTerminator());
3123 
3124     // Here we use the same DebugLoc as the scalar loop latch branch instead
3125     // of the corresponding compare because they may have ended up with
3126     // different line numbers and we want to avoid awkward line stepping while
3127     // debugging. Eg. if the compare has got a line number inside the loop.
3128     cast<Instruction>(CmpN)->setDebugLoc(ScalarLatchBr->getDebugLoc());
3129   }
3130 
3131   BranchInst *BrInst =
3132       BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, CmpN);
3133   BrInst->setDebugLoc(ScalarLatchBr->getDebugLoc());
3134   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3135 
3136   // Get ready to start creating new instructions into the vectorized body.
3137   assert(LoopVectorPreHeader == Lp->getLoopPreheader() &&
3138          "Inconsistent vector loop preheader");
3139   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3140 
3141   Optional<MDNode *> VectorizedLoopID =
3142       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3143                                       LLVMLoopVectorizeFollowupVectorized});
3144   if (VectorizedLoopID.hasValue()) {
3145     Lp->setLoopID(VectorizedLoopID.getValue());
3146 
3147     // Do not setAlreadyVectorized if loop attributes have been defined
3148     // explicitly.
3149     return LoopVectorPreHeader;
3150   }
3151 
3152   // Keep all loop hints from the original loop on the vector loop (we'll
3153   // replace the vectorizer-specific hints below).
3154   if (MDNode *LID = OrigLoop->getLoopID())
3155     Lp->setLoopID(LID);
3156 
3157   LoopVectorizeHints Hints(Lp, true, *ORE);
3158   Hints.setAlreadyVectorized();
3159 
3160 #ifdef EXPENSIVE_CHECKS
3161   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3162   LI->verify(*DT);
3163 #endif
3164 
3165   return LoopVectorPreHeader;
3166 }
3167 
3168 // Fix up external users of the induction variable. At this point, we are
3169 // in LCSSA form, with all external PHIs that use the IV having one input value,
3170 // coming from the remainder loop. We need those PHIs to also have a correct
3171 // value for the IV when arriving directly from the middle block.
3172 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3173                                        const InductionDescriptor &II,
3174                                        Value *CountRoundDown, Value *EndValue,
3175                                        BasicBlock *MiddleBlock) {
3176   // There are two kinds of external IV usages - those that use the value
3177   // computed in the last iteration (the PHI) and those that use the penultimate
3178   // value (the value that feeds into the phi from the loop latch).
3179   // We allow both, but they, obviously, have different values.
3180 
3181   assert(OrigLoop->getExitBlock() && "Expected a single exit block");
3182 
3183   DenseMap<Value *, Value *> MissingVals;
3184 
3185   // An external user of the last iteration's value should see the value that
3186   // the remainder loop uses to initialize its own IV.
3187   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3188   for (User *U : PostInc->users()) {
3189     Instruction *UI = cast<Instruction>(U);
3190     if (!OrigLoop->contains(UI)) {
3191       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3192       MissingVals[UI] = EndValue;
3193     }
3194   }
3195 
3196   // An external user of the penultimate value need to see EndValue - Step.
3197   // The simplest way to get this is to recompute it from the constituent SCEVs,
3198   // that is Start + (Step * (CRD - 1)).
3199   for (User *U : OrigPhi->users()) {
3200     auto *UI = cast<Instruction>(U);
3201     if (!OrigLoop->contains(UI)) {
3202       const DataLayout &DL =
3203           OrigLoop->getHeader()->getModule()->getDataLayout();
3204       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3205 
3206       IRBuilder<> B(MiddleBlock->getTerminator());
3207       Value *CountMinusOne = B.CreateSub(
3208           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3209       Value *CMO =
3210           !II.getStep()->getType()->isIntegerTy()
3211               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3212                              II.getStep()->getType())
3213               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3214       CMO->setName("cast.cmo");
3215       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3216       Escape->setName("ind.escape");
3217       MissingVals[UI] = Escape;
3218     }
3219   }
3220 
3221   for (auto &I : MissingVals) {
3222     PHINode *PHI = cast<PHINode>(I.first);
3223     // One corner case we have to handle is two IVs "chasing" each-other,
3224     // that is %IV2 = phi [...], [ %IV1, %latch ]
3225     // In this case, if IV1 has an external use, we need to avoid adding both
3226     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3227     // don't already have an incoming value for the middle block.
3228     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3229       PHI->addIncoming(I.second, MiddleBlock);
3230   }
3231 }
3232 
3233 namespace {
3234 
3235 struct CSEDenseMapInfo {
3236   static bool canHandle(const Instruction *I) {
3237     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3238            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3239   }
3240 
3241   static inline Instruction *getEmptyKey() {
3242     return DenseMapInfo<Instruction *>::getEmptyKey();
3243   }
3244 
3245   static inline Instruction *getTombstoneKey() {
3246     return DenseMapInfo<Instruction *>::getTombstoneKey();
3247   }
3248 
3249   static unsigned getHashValue(const Instruction *I) {
3250     assert(canHandle(I) && "Unknown instruction!");
3251     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3252                                                            I->value_op_end()));
3253   }
3254 
3255   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3256     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3257         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3258       return LHS == RHS;
3259     return LHS->isIdenticalTo(RHS);
3260   }
3261 };
3262 
3263 } // end anonymous namespace
3264 
3265 ///Perform cse of induction variable instructions.
3266 static void cse(BasicBlock *BB) {
3267   // Perform simple cse.
3268   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3269   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3270     Instruction *In = &*I++;
3271 
3272     if (!CSEDenseMapInfo::canHandle(In))
3273       continue;
3274 
3275     // Check if we can replace this instruction with any of the
3276     // visited instructions.
3277     if (Instruction *V = CSEMap.lookup(In)) {
3278       In->replaceAllUsesWith(V);
3279       In->eraseFromParent();
3280       continue;
3281     }
3282 
3283     CSEMap[In] = In;
3284   }
3285 }
3286 
3287 unsigned LoopVectorizationCostModel::getVectorCallCost(CallInst *CI,
3288                                                        unsigned VF,
3289                                                        bool &NeedToScalarize) {
3290   Function *F = CI->getCalledFunction();
3291   Type *ScalarRetTy = CI->getType();
3292   SmallVector<Type *, 4> Tys, ScalarTys;
3293   for (auto &ArgOp : CI->arg_operands())
3294     ScalarTys.push_back(ArgOp->getType());
3295 
3296   // Estimate cost of scalarized vector call. The source operands are assumed
3297   // to be vectors, so we need to extract individual elements from there,
3298   // execute VF scalar calls, and then gather the result into the vector return
3299   // value.
3300   unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys,
3301                                                  TTI::TCK_RecipThroughput);
3302   if (VF == 1)
3303     return ScalarCallCost;
3304 
3305   // Compute corresponding vector type for return value and arguments.
3306   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3307   for (Type *ScalarTy : ScalarTys)
3308     Tys.push_back(ToVectorTy(ScalarTy, VF));
3309 
3310   // Compute costs of unpacking argument values for the scalar calls and
3311   // packing the return values to a vector.
3312   unsigned ScalarizationCost = getScalarizationOverhead(CI, VF);
3313 
3314   unsigned Cost = ScalarCallCost * VF + ScalarizationCost;
3315 
3316   // If we can't emit a vector call for this function, then the currently found
3317   // cost is the cost we need to return.
3318   NeedToScalarize = true;
3319   VFShape Shape = VFShape::get(*CI, {VF, false}, false /*HasGlobalPred*/);
3320   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3321 
3322   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3323     return Cost;
3324 
3325   // If the corresponding vector cost is cheaper, return its cost.
3326   unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys,
3327                                                  TTI::TCK_RecipThroughput);
3328   if (VectorCallCost < Cost) {
3329     NeedToScalarize = false;
3330     return VectorCallCost;
3331   }
3332   return Cost;
3333 }
3334 
3335 unsigned LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3336                                                             unsigned VF) {
3337   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3338   assert(ID && "Expected intrinsic call!");
3339 
3340   IntrinsicCostAttributes CostAttrs(ID, *CI, VF);
3341   return TTI.getIntrinsicInstrCost(CostAttrs,
3342                                    TargetTransformInfo::TCK_RecipThroughput);
3343 }
3344 
3345 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3346   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3347   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3348   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3349 }
3350 
3351 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3352   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3353   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3354   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3355 }
3356 
3357 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
3358   // For every instruction `I` in MinBWs, truncate the operands, create a
3359   // truncated version of `I` and reextend its result. InstCombine runs
3360   // later and will remove any ext/trunc pairs.
3361   SmallPtrSet<Value *, 4> Erased;
3362   for (const auto &KV : Cost->getMinimalBitwidths()) {
3363     // If the value wasn't vectorized, we must maintain the original scalar
3364     // type. The absence of the value from VectorLoopValueMap indicates that it
3365     // wasn't vectorized.
3366     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3367       continue;
3368     for (unsigned Part = 0; Part < UF; ++Part) {
3369       Value *I = getOrCreateVectorValue(KV.first, Part);
3370       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3371         continue;
3372       Type *OriginalTy = I->getType();
3373       Type *ScalarTruncatedTy =
3374           IntegerType::get(OriginalTy->getContext(), KV.second);
3375       auto *TruncatedTy = FixedVectorType::get(
3376           ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getNumElements());
3377       if (TruncatedTy == OriginalTy)
3378         continue;
3379 
3380       IRBuilder<> B(cast<Instruction>(I));
3381       auto ShrinkOperand = [&](Value *V) -> Value * {
3382         if (auto *ZI = dyn_cast<ZExtInst>(V))
3383           if (ZI->getSrcTy() == TruncatedTy)
3384             return ZI->getOperand(0);
3385         return B.CreateZExtOrTrunc(V, TruncatedTy);
3386       };
3387 
3388       // The actual instruction modification depends on the instruction type,
3389       // unfortunately.
3390       Value *NewI = nullptr;
3391       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3392         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3393                              ShrinkOperand(BO->getOperand(1)));
3394 
3395         // Any wrapping introduced by shrinking this operation shouldn't be
3396         // considered undefined behavior. So, we can't unconditionally copy
3397         // arithmetic wrapping flags to NewI.
3398         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3399       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3400         NewI =
3401             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3402                          ShrinkOperand(CI->getOperand(1)));
3403       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3404         NewI = B.CreateSelect(SI->getCondition(),
3405                               ShrinkOperand(SI->getTrueValue()),
3406                               ShrinkOperand(SI->getFalseValue()));
3407       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3408         switch (CI->getOpcode()) {
3409         default:
3410           llvm_unreachable("Unhandled cast!");
3411         case Instruction::Trunc:
3412           NewI = ShrinkOperand(CI->getOperand(0));
3413           break;
3414         case Instruction::SExt:
3415           NewI = B.CreateSExtOrTrunc(
3416               CI->getOperand(0),
3417               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3418           break;
3419         case Instruction::ZExt:
3420           NewI = B.CreateZExtOrTrunc(
3421               CI->getOperand(0),
3422               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3423           break;
3424         }
3425       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3426         auto Elements0 =
3427             cast<VectorType>(SI->getOperand(0)->getType())->getNumElements();
3428         auto *O0 = B.CreateZExtOrTrunc(
3429             SI->getOperand(0),
3430             FixedVectorType::get(ScalarTruncatedTy, Elements0));
3431         auto Elements1 =
3432             cast<VectorType>(SI->getOperand(1)->getType())->getNumElements();
3433         auto *O1 = B.CreateZExtOrTrunc(
3434             SI->getOperand(1),
3435             FixedVectorType::get(ScalarTruncatedTy, Elements1));
3436 
3437         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3438       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3439         // Don't do anything with the operands, just extend the result.
3440         continue;
3441       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3442         auto Elements =
3443             cast<VectorType>(IE->getOperand(0)->getType())->getNumElements();
3444         auto *O0 = B.CreateZExtOrTrunc(
3445             IE->getOperand(0),
3446             FixedVectorType::get(ScalarTruncatedTy, Elements));
3447         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3448         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3449       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3450         auto Elements =
3451             cast<VectorType>(EE->getOperand(0)->getType())->getNumElements();
3452         auto *O0 = B.CreateZExtOrTrunc(
3453             EE->getOperand(0),
3454             FixedVectorType::get(ScalarTruncatedTy, Elements));
3455         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3456       } else {
3457         // If we don't know what to do, be conservative and don't do anything.
3458         continue;
3459       }
3460 
3461       // Lastly, extend the result.
3462       NewI->takeName(cast<Instruction>(I));
3463       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3464       I->replaceAllUsesWith(Res);
3465       cast<Instruction>(I)->eraseFromParent();
3466       Erased.insert(I);
3467       VectorLoopValueMap.resetVectorValue(KV.first, Part, Res);
3468     }
3469   }
3470 
3471   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3472   for (const auto &KV : Cost->getMinimalBitwidths()) {
3473     // If the value wasn't vectorized, we must maintain the original scalar
3474     // type. The absence of the value from VectorLoopValueMap indicates that it
3475     // wasn't vectorized.
3476     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3477       continue;
3478     for (unsigned Part = 0; Part < UF; ++Part) {
3479       Value *I = getOrCreateVectorValue(KV.first, Part);
3480       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3481       if (Inst && Inst->use_empty()) {
3482         Value *NewI = Inst->getOperand(0);
3483         Inst->eraseFromParent();
3484         VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI);
3485       }
3486     }
3487   }
3488 }
3489 
3490 void InnerLoopVectorizer::fixVectorizedLoop() {
3491   // Insert truncates and extends for any truncated instructions as hints to
3492   // InstCombine.
3493   if (VF > 1)
3494     truncateToMinimalBitwidths();
3495 
3496   // Fix widened non-induction PHIs by setting up the PHI operands.
3497   if (OrigPHIsToFix.size()) {
3498     assert(EnableVPlanNativePath &&
3499            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3500     fixNonInductionPHIs();
3501   }
3502 
3503   // At this point every instruction in the original loop is widened to a
3504   // vector form. Now we need to fix the recurrences in the loop. These PHI
3505   // nodes are currently empty because we did not want to introduce cycles.
3506   // This is the second stage of vectorizing recurrences.
3507   fixCrossIterationPHIs();
3508 
3509   // Forget the original basic block.
3510   PSE.getSE()->forgetLoop(OrigLoop);
3511 
3512   // Fix-up external users of the induction variables.
3513   for (auto &Entry : Legal->getInductionVars())
3514     fixupIVUsers(Entry.first, Entry.second,
3515                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3516                  IVEndValues[Entry.first], LoopMiddleBlock);
3517 
3518   fixLCSSAPHIs();
3519   for (Instruction *PI : PredicatedInstructions)
3520     sinkScalarOperands(&*PI);
3521 
3522   // Remove redundant induction instructions.
3523   cse(LoopVectorBody);
3524 
3525   // Set/update profile weights for the vector and remainder loops as original
3526   // loop iterations are now distributed among them. Note that original loop
3527   // represented by LoopScalarBody becomes remainder loop after vectorization.
3528   //
3529   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
3530   // end up getting slightly roughened result but that should be OK since
3531   // profile is not inherently precise anyway. Note also possible bypass of
3532   // vector code caused by legality checks is ignored, assigning all the weight
3533   // to the vector loop, optimistically.
3534   setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody),
3535                                LI->getLoopFor(LoopVectorBody),
3536                                LI->getLoopFor(LoopScalarBody), VF * UF);
3537 }
3538 
3539 void InnerLoopVectorizer::fixCrossIterationPHIs() {
3540   // In order to support recurrences we need to be able to vectorize Phi nodes.
3541   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3542   // stage #2: We now need to fix the recurrences by adding incoming edges to
3543   // the currently empty PHI nodes. At this point every instruction in the
3544   // original loop is widened to a vector form so we can use them to construct
3545   // the incoming edges.
3546   for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
3547     // Handle first-order recurrences and reductions that need to be fixed.
3548     if (Legal->isFirstOrderRecurrence(&Phi))
3549       fixFirstOrderRecurrence(&Phi);
3550     else if (Legal->isReductionVariable(&Phi))
3551       fixReduction(&Phi);
3552   }
3553 }
3554 
3555 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
3556   // This is the second phase of vectorizing first-order recurrences. An
3557   // overview of the transformation is described below. Suppose we have the
3558   // following loop.
3559   //
3560   //   for (int i = 0; i < n; ++i)
3561   //     b[i] = a[i] - a[i - 1];
3562   //
3563   // There is a first-order recurrence on "a". For this loop, the shorthand
3564   // scalar IR looks like:
3565   //
3566   //   scalar.ph:
3567   //     s_init = a[-1]
3568   //     br scalar.body
3569   //
3570   //   scalar.body:
3571   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3572   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3573   //     s2 = a[i]
3574   //     b[i] = s2 - s1
3575   //     br cond, scalar.body, ...
3576   //
3577   // In this example, s1 is a recurrence because it's value depends on the
3578   // previous iteration. In the first phase of vectorization, we created a
3579   // temporary value for s1. We now complete the vectorization and produce the
3580   // shorthand vector IR shown below (for VF = 4, UF = 1).
3581   //
3582   //   vector.ph:
3583   //     v_init = vector(..., ..., ..., a[-1])
3584   //     br vector.body
3585   //
3586   //   vector.body
3587   //     i = phi [0, vector.ph], [i+4, vector.body]
3588   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
3589   //     v2 = a[i, i+1, i+2, i+3];
3590   //     v3 = vector(v1(3), v2(0, 1, 2))
3591   //     b[i, i+1, i+2, i+3] = v2 - v3
3592   //     br cond, vector.body, middle.block
3593   //
3594   //   middle.block:
3595   //     x = v2(3)
3596   //     br scalar.ph
3597   //
3598   //   scalar.ph:
3599   //     s_init = phi [x, middle.block], [a[-1], otherwise]
3600   //     br scalar.body
3601   //
3602   // After execution completes the vector loop, we extract the next value of
3603   // the recurrence (x) to use as the initial value in the scalar loop.
3604 
3605   // Get the original loop preheader and single loop latch.
3606   auto *Preheader = OrigLoop->getLoopPreheader();
3607   auto *Latch = OrigLoop->getLoopLatch();
3608 
3609   // Get the initial and previous values of the scalar recurrence.
3610   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
3611   auto *Previous = Phi->getIncomingValueForBlock(Latch);
3612 
3613   // Create a vector from the initial value.
3614   auto *VectorInit = ScalarInit;
3615   if (VF > 1) {
3616     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3617     VectorInit = Builder.CreateInsertElement(
3618         UndefValue::get(FixedVectorType::get(VectorInit->getType(), VF)),
3619         VectorInit, Builder.getInt32(VF - 1), "vector.recur.init");
3620   }
3621 
3622   // We constructed a temporary phi node in the first phase of vectorization.
3623   // This phi node will eventually be deleted.
3624   Builder.SetInsertPoint(
3625       cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0)));
3626 
3627   // Create a phi node for the new recurrence. The current value will either be
3628   // the initial value inserted into a vector or loop-varying vector value.
3629   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
3630   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
3631 
3632   // Get the vectorized previous value of the last part UF - 1. It appears last
3633   // among all unrolled iterations, due to the order of their construction.
3634   Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1);
3635 
3636   // Find and set the insertion point after the previous value if it is an
3637   // instruction.
3638   BasicBlock::iterator InsertPt;
3639   // Note that the previous value may have been constant-folded so it is not
3640   // guaranteed to be an instruction in the vector loop.
3641   // FIXME: Loop invariant values do not form recurrences. We should deal with
3642   //        them earlier.
3643   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
3644     InsertPt = LoopVectorBody->getFirstInsertionPt();
3645   else {
3646     Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
3647     if (isa<PHINode>(PreviousLastPart))
3648       // If the previous value is a phi node, we should insert after all the phi
3649       // nodes in the block containing the PHI to avoid breaking basic block
3650       // verification. Note that the basic block may be different to
3651       // LoopVectorBody, in case we predicate the loop.
3652       InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
3653     else
3654       InsertPt = ++PreviousInst->getIterator();
3655   }
3656   Builder.SetInsertPoint(&*InsertPt);
3657 
3658   // We will construct a vector for the recurrence by combining the values for
3659   // the current and previous iterations. This is the required shuffle mask.
3660   SmallVector<int, 8> ShuffleMask(VF);
3661   ShuffleMask[0] = VF - 1;
3662   for (unsigned I = 1; I < VF; ++I)
3663     ShuffleMask[I] = I + VF - 1;
3664 
3665   // The vector from which to take the initial value for the current iteration
3666   // (actual or unrolled). Initially, this is the vector phi node.
3667   Value *Incoming = VecPhi;
3668 
3669   // Shuffle the current and previous vector and update the vector parts.
3670   for (unsigned Part = 0; Part < UF; ++Part) {
3671     Value *PreviousPart = getOrCreateVectorValue(Previous, Part);
3672     Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part);
3673     auto *Shuffle = VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart,
3674                                                          ShuffleMask)
3675                            : Incoming;
3676     PhiPart->replaceAllUsesWith(Shuffle);
3677     cast<Instruction>(PhiPart)->eraseFromParent();
3678     VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle);
3679     Incoming = PreviousPart;
3680   }
3681 
3682   // Fix the latch value of the new recurrence in the vector loop.
3683   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3684 
3685   // Extract the last vector element in the middle block. This will be the
3686   // initial value for the recurrence when jumping to the scalar loop.
3687   auto *ExtractForScalar = Incoming;
3688   if (VF > 1) {
3689     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3690     ExtractForScalar = Builder.CreateExtractElement(
3691         ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract");
3692   }
3693   // Extract the second last element in the middle block if the
3694   // Phi is used outside the loop. We need to extract the phi itself
3695   // and not the last element (the phi update in the current iteration). This
3696   // will be the value when jumping to the exit block from the LoopMiddleBlock,
3697   // when the scalar loop is not run at all.
3698   Value *ExtractForPhiUsedOutsideLoop = nullptr;
3699   if (VF > 1)
3700     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
3701         Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi");
3702   // When loop is unrolled without vectorizing, initialize
3703   // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
3704   // `Incoming`. This is analogous to the vectorized case above: extracting the
3705   // second last element when VF > 1.
3706   else if (UF > 1)
3707     ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2);
3708 
3709   // Fix the initial value of the original recurrence in the scalar loop.
3710   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
3711   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
3712   for (auto *BB : predecessors(LoopScalarPreHeader)) {
3713     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
3714     Start->addIncoming(Incoming, BB);
3715   }
3716 
3717   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
3718   Phi->setName("scalar.recur");
3719 
3720   // Finally, fix users of the recurrence outside the loop. The users will need
3721   // either the last value of the scalar recurrence or the last value of the
3722   // vector recurrence we extracted in the middle block. Since the loop is in
3723   // LCSSA form, we just need to find all the phi nodes for the original scalar
3724   // recurrence in the exit block, and then add an edge for the middle block.
3725   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3726     if (LCSSAPhi.getIncomingValue(0) == Phi) {
3727       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
3728     }
3729   }
3730 }
3731 
3732 void InnerLoopVectorizer::fixReduction(PHINode *Phi) {
3733   Constant *Zero = Builder.getInt32(0);
3734 
3735   // Get it's reduction variable descriptor.
3736   assert(Legal->isReductionVariable(Phi) &&
3737          "Unable to find the reduction variable");
3738   RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
3739 
3740   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3741   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
3742   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
3743   RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
3744     RdxDesc.getMinMaxRecurrenceKind();
3745   setDebugLocFromInst(Builder, ReductionStartValue);
3746 
3747   // We need to generate a reduction vector from the incoming scalar.
3748   // To do so, we need to generate the 'identity' vector and override
3749   // one of the elements with the incoming scalar reduction. We need
3750   // to do it in the vector-loop preheader.
3751   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3752 
3753   // This is the vector-clone of the value that leaves the loop.
3754   Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType();
3755 
3756   // Find the reduction identity variable. Zero for addition, or, xor,
3757   // one for multiplication, -1 for And.
3758   Value *Identity;
3759   Value *VectorStart;
3760   if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
3761       RK == RecurrenceDescriptor::RK_FloatMinMax) {
3762     // MinMax reduction have the start value as their identify.
3763     if (VF == 1) {
3764       VectorStart = Identity = ReductionStartValue;
3765     } else {
3766       VectorStart = Identity =
3767         Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident");
3768     }
3769   } else {
3770     // Handle other reduction kinds:
3771     Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
3772         RK, VecTy->getScalarType());
3773     if (VF == 1) {
3774       Identity = Iden;
3775       // This vector is the Identity vector where the first element is the
3776       // incoming scalar reduction.
3777       VectorStart = ReductionStartValue;
3778     } else {
3779       Identity = ConstantVector::getSplat({VF, false}, Iden);
3780 
3781       // This vector is the Identity vector where the first element is the
3782       // incoming scalar reduction.
3783       VectorStart =
3784         Builder.CreateInsertElement(Identity, ReductionStartValue, Zero);
3785     }
3786   }
3787 
3788   // Wrap flags are in general invalid after vectorization, clear them.
3789   clearReductionWrapFlags(RdxDesc);
3790 
3791   // Fix the vector-loop phi.
3792 
3793   // Reductions do not have to start at zero. They can start with
3794   // any loop invariant values.
3795   BasicBlock *Latch = OrigLoop->getLoopLatch();
3796   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
3797 
3798   for (unsigned Part = 0; Part < UF; ++Part) {
3799     Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part);
3800     Value *Val = getOrCreateVectorValue(LoopVal, Part);
3801     // Make sure to add the reduction start value only to the
3802     // first unroll part.
3803     Value *StartVal = (Part == 0) ? VectorStart : Identity;
3804     cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader);
3805     cast<PHINode>(VecRdxPhi)
3806       ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3807   }
3808 
3809   // Before each round, move the insertion point right between
3810   // the PHIs and the values we are going to write.
3811   // This allows us to write both PHINodes and the extractelement
3812   // instructions.
3813   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3814 
3815   setDebugLocFromInst(Builder, LoopExitInst);
3816 
3817   // If tail is folded by masking, the vector value to leave the loop should be
3818   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
3819   // instead of the former.
3820   if (Cost->foldTailByMasking()) {
3821     for (unsigned Part = 0; Part < UF; ++Part) {
3822       Value *VecLoopExitInst =
3823           VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3824       Value *Sel = nullptr;
3825       for (User *U : VecLoopExitInst->users()) {
3826         if (isa<SelectInst>(U)) {
3827           assert(!Sel && "Reduction exit feeding two selects");
3828           Sel = U;
3829         } else
3830           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
3831       }
3832       assert(Sel && "Reduction exit feeds no select");
3833       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, Sel);
3834     }
3835   }
3836 
3837   // If the vector reduction can be performed in a smaller type, we truncate
3838   // then extend the loop exit value to enable InstCombine to evaluate the
3839   // entire expression in the smaller type.
3840   if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) {
3841     Type *RdxVecTy = FixedVectorType::get(RdxDesc.getRecurrenceType(), VF);
3842     Builder.SetInsertPoint(
3843         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
3844     VectorParts RdxParts(UF);
3845     for (unsigned Part = 0; Part < UF; ++Part) {
3846       RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3847       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3848       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
3849                                         : Builder.CreateZExt(Trunc, VecTy);
3850       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
3851            UI != RdxParts[Part]->user_end();)
3852         if (*UI != Trunc) {
3853           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
3854           RdxParts[Part] = Extnd;
3855         } else {
3856           ++UI;
3857         }
3858     }
3859     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3860     for (unsigned Part = 0; Part < UF; ++Part) {
3861       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3862       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]);
3863     }
3864   }
3865 
3866   // Reduce all of the unrolled parts into a single vector.
3867   Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0);
3868   unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
3869 
3870   // The middle block terminator has already been assigned a DebugLoc here (the
3871   // OrigLoop's single latch terminator). We want the whole middle block to
3872   // appear to execute on this line because: (a) it is all compiler generated,
3873   // (b) these instructions are always executed after evaluating the latch
3874   // conditional branch, and (c) other passes may add new predecessors which
3875   // terminate on this line. This is the easiest way to ensure we don't
3876   // accidentally cause an extra step back into the loop while debugging.
3877   setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
3878   for (unsigned Part = 1; Part < UF; ++Part) {
3879     Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3880     if (Op != Instruction::ICmp && Op != Instruction::FCmp)
3881       // Floating point operations had to be 'fast' to enable the reduction.
3882       ReducedPartRdx = addFastMathFlag(
3883           Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart,
3884                               ReducedPartRdx, "bin.rdx"),
3885           RdxDesc.getFastMathFlags());
3886     else
3887       ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx,
3888                                       RdxPart);
3889   }
3890 
3891   if (VF > 1) {
3892     bool NoNaN = Legal->hasFunNoNaNAttr();
3893     ReducedPartRdx =
3894         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN);
3895     // If the reduction can be performed in a smaller type, we need to extend
3896     // the reduction to the wider type before we branch to the original loop.
3897     if (Phi->getType() != RdxDesc.getRecurrenceType())
3898       ReducedPartRdx =
3899         RdxDesc.isSigned()
3900         ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
3901         : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
3902   }
3903 
3904   // Create a phi node that merges control-flow from the backedge-taken check
3905   // block and the middle block.
3906   PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
3907                                         LoopScalarPreHeader->getTerminator());
3908   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
3909     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
3910   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
3911 
3912   // Now, we need to fix the users of the reduction variable
3913   // inside and outside of the scalar remainder loop.
3914   // We know that the loop is in LCSSA form. We need to update the
3915   // PHI nodes in the exit blocks.
3916   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3917     // All PHINodes need to have a single entry edge, or two if
3918     // we already fixed them.
3919     assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
3920 
3921     // We found a reduction value exit-PHI. Update it with the
3922     // incoming bypass edge.
3923     if (LCSSAPhi.getIncomingValue(0) == LoopExitInst)
3924       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
3925   } // end of the LCSSA phi scan.
3926 
3927     // Fix the scalar loop reduction variable with the incoming reduction sum
3928     // from the vector body and from the backedge value.
3929   int IncomingEdgeBlockIdx =
3930     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
3931   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
3932   // Pick the other block.
3933   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
3934   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
3935   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
3936 }
3937 
3938 void InnerLoopVectorizer::clearReductionWrapFlags(
3939     RecurrenceDescriptor &RdxDesc) {
3940   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3941   if (RK != RecurrenceDescriptor::RK_IntegerAdd &&
3942       RK != RecurrenceDescriptor::RK_IntegerMult)
3943     return;
3944 
3945   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
3946   assert(LoopExitInstr && "null loop exit instruction");
3947   SmallVector<Instruction *, 8> Worklist;
3948   SmallPtrSet<Instruction *, 8> Visited;
3949   Worklist.push_back(LoopExitInstr);
3950   Visited.insert(LoopExitInstr);
3951 
3952   while (!Worklist.empty()) {
3953     Instruction *Cur = Worklist.pop_back_val();
3954     if (isa<OverflowingBinaryOperator>(Cur))
3955       for (unsigned Part = 0; Part < UF; ++Part) {
3956         Value *V = getOrCreateVectorValue(Cur, Part);
3957         cast<Instruction>(V)->dropPoisonGeneratingFlags();
3958       }
3959 
3960     for (User *U : Cur->users()) {
3961       Instruction *UI = cast<Instruction>(U);
3962       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
3963           Visited.insert(UI).second)
3964         Worklist.push_back(UI);
3965     }
3966   }
3967 }
3968 
3969 void InnerLoopVectorizer::fixLCSSAPHIs() {
3970   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3971     if (LCSSAPhi.getNumIncomingValues() == 1) {
3972       auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
3973       // Non-instruction incoming values will have only one value.
3974       unsigned LastLane = 0;
3975       if (isa<Instruction>(IncomingValue))
3976           LastLane = Cost->isUniformAfterVectorization(
3977                          cast<Instruction>(IncomingValue), VF)
3978                          ? 0
3979                          : VF - 1;
3980       // Can be a loop invariant incoming value or the last scalar value to be
3981       // extracted from the vectorized loop.
3982       Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3983       Value *lastIncomingValue =
3984           getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane });
3985       LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
3986     }
3987   }
3988 }
3989 
3990 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
3991   // The basic block and loop containing the predicated instruction.
3992   auto *PredBB = PredInst->getParent();
3993   auto *VectorLoop = LI->getLoopFor(PredBB);
3994 
3995   // Initialize a worklist with the operands of the predicated instruction.
3996   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
3997 
3998   // Holds instructions that we need to analyze again. An instruction may be
3999   // reanalyzed if we don't yet know if we can sink it or not.
4000   SmallVector<Instruction *, 8> InstsToReanalyze;
4001 
4002   // Returns true if a given use occurs in the predicated block. Phi nodes use
4003   // their operands in their corresponding predecessor blocks.
4004   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4005     auto *I = cast<Instruction>(U.getUser());
4006     BasicBlock *BB = I->getParent();
4007     if (auto *Phi = dyn_cast<PHINode>(I))
4008       BB = Phi->getIncomingBlock(
4009           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4010     return BB == PredBB;
4011   };
4012 
4013   // Iteratively sink the scalarized operands of the predicated instruction
4014   // into the block we created for it. When an instruction is sunk, it's
4015   // operands are then added to the worklist. The algorithm ends after one pass
4016   // through the worklist doesn't sink a single instruction.
4017   bool Changed;
4018   do {
4019     // Add the instructions that need to be reanalyzed to the worklist, and
4020     // reset the changed indicator.
4021     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4022     InstsToReanalyze.clear();
4023     Changed = false;
4024 
4025     while (!Worklist.empty()) {
4026       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4027 
4028       // We can't sink an instruction if it is a phi node, is already in the
4029       // predicated block, is not in the loop, or may have side effects.
4030       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
4031           !VectorLoop->contains(I) || I->mayHaveSideEffects())
4032         continue;
4033 
4034       // It's legal to sink the instruction if all its uses occur in the
4035       // predicated block. Otherwise, there's nothing to do yet, and we may
4036       // need to reanalyze the instruction.
4037       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4038         InstsToReanalyze.push_back(I);
4039         continue;
4040       }
4041 
4042       // Move the instruction to the beginning of the predicated block, and add
4043       // it's operands to the worklist.
4044       I->moveBefore(&*PredBB->getFirstInsertionPt());
4045       Worklist.insert(I->op_begin(), I->op_end());
4046 
4047       // The sinking may have enabled other instructions to be sunk, so we will
4048       // need to iterate.
4049       Changed = true;
4050     }
4051   } while (Changed);
4052 }
4053 
4054 void InnerLoopVectorizer::fixNonInductionPHIs() {
4055   for (PHINode *OrigPhi : OrigPHIsToFix) {
4056     PHINode *NewPhi =
4057         cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0));
4058     unsigned NumIncomingValues = OrigPhi->getNumIncomingValues();
4059 
4060     SmallVector<BasicBlock *, 2> ScalarBBPredecessors(
4061         predecessors(OrigPhi->getParent()));
4062     SmallVector<BasicBlock *, 2> VectorBBPredecessors(
4063         predecessors(NewPhi->getParent()));
4064     assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() &&
4065            "Scalar and Vector BB should have the same number of predecessors");
4066 
4067     // The insertion point in Builder may be invalidated by the time we get
4068     // here. Force the Builder insertion point to something valid so that we do
4069     // not run into issues during insertion point restore in
4070     // getOrCreateVectorValue calls below.
4071     Builder.SetInsertPoint(NewPhi);
4072 
4073     // The predecessor order is preserved and we can rely on mapping between
4074     // scalar and vector block predecessors.
4075     for (unsigned i = 0; i < NumIncomingValues; ++i) {
4076       BasicBlock *NewPredBB = VectorBBPredecessors[i];
4077 
4078       // When looking up the new scalar/vector values to fix up, use incoming
4079       // values from original phi.
4080       Value *ScIncV =
4081           OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]);
4082 
4083       // Scalar incoming value may need a broadcast
4084       Value *NewIncV = getOrCreateVectorValue(ScIncV, 0);
4085       NewPhi->addIncoming(NewIncV, NewPredBB);
4086     }
4087   }
4088 }
4089 
4090 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPUser &Operands,
4091                                    unsigned UF, unsigned VF,
4092                                    bool IsPtrLoopInvariant,
4093                                    SmallBitVector &IsIndexLoopInvariant,
4094                                    VPTransformState &State) {
4095   // Construct a vector GEP by widening the operands of the scalar GEP as
4096   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4097   // results in a vector of pointers when at least one operand of the GEP
4098   // is vector-typed. Thus, to keep the representation compact, we only use
4099   // vector-typed operands for loop-varying values.
4100 
4101   if (VF > 1 && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4102     // If we are vectorizing, but the GEP has only loop-invariant operands,
4103     // the GEP we build (by only using vector-typed operands for
4104     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4105     // produce a vector of pointers, we need to either arbitrarily pick an
4106     // operand to broadcast, or broadcast a clone of the original GEP.
4107     // Here, we broadcast a clone of the original.
4108     //
4109     // TODO: If at some point we decide to scalarize instructions having
4110     //       loop-invariant operands, this special case will no longer be
4111     //       required. We would add the scalarization decision to
4112     //       collectLoopScalars() and teach getVectorValue() to broadcast
4113     //       the lane-zero scalar value.
4114     auto *Clone = Builder.Insert(GEP->clone());
4115     for (unsigned Part = 0; Part < UF; ++Part) {
4116       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4117       VectorLoopValueMap.setVectorValue(GEP, Part, EntryPart);
4118       addMetadata(EntryPart, GEP);
4119     }
4120   } else {
4121     // If the GEP has at least one loop-varying operand, we are sure to
4122     // produce a vector of pointers. But if we are only unrolling, we want
4123     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4124     // produce with the code below will be scalar (if VF == 1) or vector
4125     // (otherwise). Note that for the unroll-only case, we still maintain
4126     // values in the vector mapping with initVector, as we do for other
4127     // instructions.
4128     for (unsigned Part = 0; Part < UF; ++Part) {
4129       // The pointer operand of the new GEP. If it's loop-invariant, we
4130       // won't broadcast it.
4131       auto *Ptr = IsPtrLoopInvariant ? State.get(Operands.getOperand(0), {0, 0})
4132                                      : State.get(Operands.getOperand(0), Part);
4133 
4134       // Collect all the indices for the new GEP. If any index is
4135       // loop-invariant, we won't broadcast it.
4136       SmallVector<Value *, 4> Indices;
4137       for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) {
4138         VPValue *Operand = Operands.getOperand(I);
4139         if (IsIndexLoopInvariant[I - 1])
4140           Indices.push_back(State.get(Operand, {0, 0}));
4141         else
4142           Indices.push_back(State.get(Operand, Part));
4143       }
4144 
4145       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4146       // but it should be a vector, otherwise.
4147       auto *NewGEP =
4148           GEP->isInBounds()
4149               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4150                                           Indices)
4151               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4152       assert((VF == 1 || NewGEP->getType()->isVectorTy()) &&
4153              "NewGEP is not a pointer vector");
4154       VectorLoopValueMap.setVectorValue(GEP, Part, NewGEP);
4155       addMetadata(NewGEP, GEP);
4156     }
4157   }
4158 }
4159 
4160 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF,
4161                                               unsigned VF) {
4162   PHINode *P = cast<PHINode>(PN);
4163   if (EnableVPlanNativePath) {
4164     // Currently we enter here in the VPlan-native path for non-induction
4165     // PHIs where all control flow is uniform. We simply widen these PHIs.
4166     // Create a vector phi with no operands - the vector phi operands will be
4167     // set at the end of vector code generation.
4168     Type *VecTy =
4169         (VF == 1) ? PN->getType() : FixedVectorType::get(PN->getType(), VF);
4170     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4171     VectorLoopValueMap.setVectorValue(P, 0, VecPhi);
4172     OrigPHIsToFix.push_back(P);
4173 
4174     return;
4175   }
4176 
4177   assert(PN->getParent() == OrigLoop->getHeader() &&
4178          "Non-header phis should have been handled elsewhere");
4179 
4180   // In order to support recurrences we need to be able to vectorize Phi nodes.
4181   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4182   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4183   // this value when we vectorize all of the instructions that use the PHI.
4184   if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
4185     for (unsigned Part = 0; Part < UF; ++Part) {
4186       // This is phase one of vectorizing PHIs.
4187       Type *VecTy =
4188           (VF == 1) ? PN->getType() : FixedVectorType::get(PN->getType(), VF);
4189       Value *EntryPart = PHINode::Create(
4190           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4191       VectorLoopValueMap.setVectorValue(P, Part, EntryPart);
4192     }
4193     return;
4194   }
4195 
4196   setDebugLocFromInst(Builder, P);
4197 
4198   // This PHINode must be an induction variable.
4199   // Make sure that we know about it.
4200   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4201 
4202   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4203   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4204 
4205   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4206   // which can be found from the original scalar operations.
4207   switch (II.getKind()) {
4208   case InductionDescriptor::IK_NoInduction:
4209     llvm_unreachable("Unknown induction");
4210   case InductionDescriptor::IK_IntInduction:
4211   case InductionDescriptor::IK_FpInduction:
4212     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4213   case InductionDescriptor::IK_PtrInduction: {
4214     // Handle the pointer induction variable case.
4215     assert(P->getType()->isPointerTy() && "Unexpected type.");
4216 
4217     if (Cost->isScalarAfterVectorization(P, VF)) {
4218       // This is the normalized GEP that starts counting at zero.
4219       Value *PtrInd =
4220           Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType());
4221       // Determine the number of scalars we need to generate for each unroll
4222       // iteration. If the instruction is uniform, we only need to generate the
4223       // first lane. Otherwise, we generate all VF values.
4224       unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF;
4225       for (unsigned Part = 0; Part < UF; ++Part) {
4226         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4227           Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF);
4228           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4229           Value *SclrGep =
4230               emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4231           SclrGep->setName("next.gep");
4232           VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep);
4233         }
4234       }
4235       return;
4236     }
4237     assert(isa<SCEVConstant>(II.getStep()) &&
4238            "Induction step not a SCEV constant!");
4239     Type *PhiType = II.getStep()->getType();
4240 
4241     // Build a pointer phi
4242     Value *ScalarStartValue = II.getStartValue();
4243     Type *ScStValueType = ScalarStartValue->getType();
4244     PHINode *NewPointerPhi =
4245         PHINode::Create(ScStValueType, 2, "pointer.phi", Induction);
4246     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4247 
4248     // A pointer induction, performed by using a gep
4249     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4250     Instruction *InductionLoc = LoopLatch->getTerminator();
4251     const SCEV *ScalarStep = II.getStep();
4252     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4253     Value *ScalarStepValue =
4254         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4255     Value *InductionGEP = GetElementPtrInst::Create(
4256         ScStValueType->getPointerElementType(), NewPointerPhi,
4257         Builder.CreateMul(ScalarStepValue, ConstantInt::get(PhiType, VF * UF)),
4258         "ptr.ind", InductionLoc);
4259     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4260 
4261     // Create UF many actual address geps that use the pointer
4262     // phi as base and a vectorized version of the step value
4263     // (<step*0, ..., step*N>) as offset.
4264     for (unsigned Part = 0; Part < UF; ++Part) {
4265       SmallVector<Constant *, 8> Indices;
4266       // Create a vector of consecutive numbers from zero to VF.
4267       for (unsigned i = 0; i < VF; ++i)
4268         Indices.push_back(ConstantInt::get(PhiType, i + Part * VF));
4269       Constant *StartOffset = ConstantVector::get(Indices);
4270 
4271       Value *GEP = Builder.CreateGEP(
4272           ScStValueType->getPointerElementType(), NewPointerPhi,
4273           Builder.CreateMul(StartOffset,
4274                             Builder.CreateVectorSplat(VF, ScalarStepValue),
4275                             "vector.gep"));
4276       VectorLoopValueMap.setVectorValue(P, Part, GEP);
4277     }
4278   }
4279   }
4280 }
4281 
4282 /// A helper function for checking whether an integer division-related
4283 /// instruction may divide by zero (in which case it must be predicated if
4284 /// executed conditionally in the scalar code).
4285 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4286 /// Non-zero divisors that are non compile-time constants will not be
4287 /// converted into multiplication, so we will still end up scalarizing
4288 /// the division, but can do so w/o predication.
4289 static bool mayDivideByZero(Instruction &I) {
4290   assert((I.getOpcode() == Instruction::UDiv ||
4291           I.getOpcode() == Instruction::SDiv ||
4292           I.getOpcode() == Instruction::URem ||
4293           I.getOpcode() == Instruction::SRem) &&
4294          "Unexpected instruction");
4295   Value *Divisor = I.getOperand(1);
4296   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4297   return !CInt || CInt->isZero();
4298 }
4299 
4300 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPUser &User,
4301                                            VPTransformState &State) {
4302   switch (I.getOpcode()) {
4303   case Instruction::Call:
4304   case Instruction::Br:
4305   case Instruction::PHI:
4306   case Instruction::GetElementPtr:
4307   case Instruction::Select:
4308     llvm_unreachable("This instruction is handled by a different recipe.");
4309   case Instruction::UDiv:
4310   case Instruction::SDiv:
4311   case Instruction::SRem:
4312   case Instruction::URem:
4313   case Instruction::Add:
4314   case Instruction::FAdd:
4315   case Instruction::Sub:
4316   case Instruction::FSub:
4317   case Instruction::FNeg:
4318   case Instruction::Mul:
4319   case Instruction::FMul:
4320   case Instruction::FDiv:
4321   case Instruction::FRem:
4322   case Instruction::Shl:
4323   case Instruction::LShr:
4324   case Instruction::AShr:
4325   case Instruction::And:
4326   case Instruction::Or:
4327   case Instruction::Xor: {
4328     // Just widen unops and binops.
4329     setDebugLocFromInst(Builder, &I);
4330 
4331     for (unsigned Part = 0; Part < UF; ++Part) {
4332       SmallVector<Value *, 2> Ops;
4333       for (VPValue *VPOp : User.operands())
4334         Ops.push_back(State.get(VPOp, Part));
4335 
4336       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
4337 
4338       if (auto *VecOp = dyn_cast<Instruction>(V))
4339         VecOp->copyIRFlags(&I);
4340 
4341       // Use this vector value for all users of the original instruction.
4342       VectorLoopValueMap.setVectorValue(&I, Part, V);
4343       addMetadata(V, &I);
4344     }
4345 
4346     break;
4347   }
4348   case Instruction::ICmp:
4349   case Instruction::FCmp: {
4350     // Widen compares. Generate vector compares.
4351     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4352     auto *Cmp = cast<CmpInst>(&I);
4353     setDebugLocFromInst(Builder, Cmp);
4354     for (unsigned Part = 0; Part < UF; ++Part) {
4355       Value *A = State.get(User.getOperand(0), Part);
4356       Value *B = State.get(User.getOperand(1), Part);
4357       Value *C = nullptr;
4358       if (FCmp) {
4359         // Propagate fast math flags.
4360         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4361         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4362         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4363       } else {
4364         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4365       }
4366       VectorLoopValueMap.setVectorValue(&I, Part, C);
4367       addMetadata(C, &I);
4368     }
4369 
4370     break;
4371   }
4372 
4373   case Instruction::ZExt:
4374   case Instruction::SExt:
4375   case Instruction::FPToUI:
4376   case Instruction::FPToSI:
4377   case Instruction::FPExt:
4378   case Instruction::PtrToInt:
4379   case Instruction::IntToPtr:
4380   case Instruction::SIToFP:
4381   case Instruction::UIToFP:
4382   case Instruction::Trunc:
4383   case Instruction::FPTrunc:
4384   case Instruction::BitCast: {
4385     auto *CI = cast<CastInst>(&I);
4386     setDebugLocFromInst(Builder, CI);
4387 
4388     /// Vectorize casts.
4389     Type *DestTy =
4390         (VF == 1) ? CI->getType() : FixedVectorType::get(CI->getType(), VF);
4391 
4392     for (unsigned Part = 0; Part < UF; ++Part) {
4393       Value *A = State.get(User.getOperand(0), Part);
4394       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4395       VectorLoopValueMap.setVectorValue(&I, Part, Cast);
4396       addMetadata(Cast, &I);
4397     }
4398     break;
4399   }
4400   default:
4401     // This instruction is not vectorized by simple widening.
4402     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4403     llvm_unreachable("Unhandled instruction!");
4404   } // end of switch.
4405 }
4406 
4407 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPUser &ArgOperands,
4408                                                VPTransformState &State) {
4409   assert(!isa<DbgInfoIntrinsic>(I) &&
4410          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4411   setDebugLocFromInst(Builder, &I);
4412 
4413   Module *M = I.getParent()->getParent()->getParent();
4414   auto *CI = cast<CallInst>(&I);
4415 
4416   SmallVector<Type *, 4> Tys;
4417   for (Value *ArgOperand : CI->arg_operands())
4418     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF));
4419 
4420   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4421 
4422   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4423   // version of the instruction.
4424   // Is it beneficial to perform intrinsic call compared to lib call?
4425   bool NeedToScalarize = false;
4426   unsigned CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4427   bool UseVectorIntrinsic =
4428       ID && Cost->getVectorIntrinsicCost(CI, VF) <= CallCost;
4429   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4430          "Instruction should be scalarized elsewhere.");
4431 
4432   for (unsigned Part = 0; Part < UF; ++Part) {
4433     SmallVector<Value *, 4> Args;
4434     for (auto &I : enumerate(ArgOperands.operands())) {
4435       // Some intrinsics have a scalar argument - don't replace it with a
4436       // vector.
4437       Value *Arg;
4438       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4439         Arg = State.get(I.value(), Part);
4440       else
4441         Arg = State.get(I.value(), {0, 0});
4442       Args.push_back(Arg);
4443     }
4444 
4445     Function *VectorF;
4446     if (UseVectorIntrinsic) {
4447       // Use vector version of the intrinsic.
4448       Type *TysForDecl[] = {CI->getType()};
4449       if (VF > 1)
4450         TysForDecl[0] =
4451             FixedVectorType::get(CI->getType()->getScalarType(), VF);
4452       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4453       assert(VectorF && "Can't retrieve vector intrinsic.");
4454     } else {
4455       // Use vector version of the function call.
4456       const VFShape Shape =
4457           VFShape::get(*CI, {VF, false} /*EC*/, false /*HasGlobalPred*/);
4458 #ifndef NDEBUG
4459       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4460              "Can't create vector function.");
4461 #endif
4462         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4463     }
4464       SmallVector<OperandBundleDef, 1> OpBundles;
4465       CI->getOperandBundlesAsDefs(OpBundles);
4466       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4467 
4468       if (isa<FPMathOperator>(V))
4469         V->copyFastMathFlags(CI);
4470 
4471       VectorLoopValueMap.setVectorValue(&I, Part, V);
4472       addMetadata(V, &I);
4473   }
4474 }
4475 
4476 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I,
4477                                                  VPUser &Operands,
4478                                                  bool InvariantCond,
4479                                                  VPTransformState &State) {
4480   setDebugLocFromInst(Builder, &I);
4481 
4482   // The condition can be loop invariant  but still defined inside the
4483   // loop. This means that we can't just use the original 'cond' value.
4484   // We have to take the 'vectorized' value and pick the first lane.
4485   // Instcombine will make this a no-op.
4486   auto *InvarCond =
4487       InvariantCond ? State.get(Operands.getOperand(0), {0, 0}) : nullptr;
4488 
4489   for (unsigned Part = 0; Part < UF; ++Part) {
4490     Value *Cond =
4491         InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part);
4492     Value *Op0 = State.get(Operands.getOperand(1), Part);
4493     Value *Op1 = State.get(Operands.getOperand(2), Part);
4494     Value *Sel = Builder.CreateSelect(Cond, Op0, Op1);
4495     VectorLoopValueMap.setVectorValue(&I, Part, Sel);
4496     addMetadata(Sel, &I);
4497   }
4498 }
4499 
4500 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
4501   // We should not collect Scalars more than once per VF. Right now, this
4502   // function is called from collectUniformsAndScalars(), which already does
4503   // this check. Collecting Scalars for VF=1 does not make any sense.
4504   assert(VF >= 2 && Scalars.find(VF) == Scalars.end() &&
4505          "This function should not be visited twice for the same VF");
4506 
4507   SmallSetVector<Instruction *, 8> Worklist;
4508 
4509   // These sets are used to seed the analysis with pointers used by memory
4510   // accesses that will remain scalar.
4511   SmallSetVector<Instruction *, 8> ScalarPtrs;
4512   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4513   auto *Latch = TheLoop->getLoopLatch();
4514 
4515   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4516   // The pointer operands of loads and stores will be scalar as long as the
4517   // memory access is not a gather or scatter operation. The value operand of a
4518   // store will remain scalar if the store is scalarized.
4519   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4520     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4521     assert(WideningDecision != CM_Unknown &&
4522            "Widening decision should be ready at this moment");
4523     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4524       if (Ptr == Store->getValueOperand())
4525         return WideningDecision == CM_Scalarize;
4526     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4527            "Ptr is neither a value or pointer operand");
4528     return WideningDecision != CM_GatherScatter;
4529   };
4530 
4531   // A helper that returns true if the given value is a bitcast or
4532   // getelementptr instruction contained in the loop.
4533   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4534     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4535             isa<GetElementPtrInst>(V)) &&
4536            !TheLoop->isLoopInvariant(V);
4537   };
4538 
4539   auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) {
4540     if (!isa<PHINode>(Ptr) ||
4541         !Legal->getInductionVars().count(cast<PHINode>(Ptr)))
4542       return false;
4543     auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)];
4544     if (Induction.getKind() != InductionDescriptor::IK_PtrInduction)
4545       return false;
4546     return isScalarUse(MemAccess, Ptr);
4547   };
4548 
4549   // A helper that evaluates a memory access's use of a pointer. If the
4550   // pointer is actually the pointer induction of a loop, it is being
4551   // inserted into Worklist. If the use will be a scalar use, and the
4552   // pointer is only used by memory accesses, we place the pointer in
4553   // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs.
4554   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4555     if (isScalarPtrInduction(MemAccess, Ptr)) {
4556       Worklist.insert(cast<Instruction>(Ptr));
4557       Instruction *Update = cast<Instruction>(
4558           cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch));
4559       Worklist.insert(Update);
4560       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr
4561                         << "\n");
4562       LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update
4563                         << "\n");
4564       return;
4565     }
4566     // We only care about bitcast and getelementptr instructions contained in
4567     // the loop.
4568     if (!isLoopVaryingBitCastOrGEP(Ptr))
4569       return;
4570 
4571     // If the pointer has already been identified as scalar (e.g., if it was
4572     // also identified as uniform), there's nothing to do.
4573     auto *I = cast<Instruction>(Ptr);
4574     if (Worklist.count(I))
4575       return;
4576 
4577     // If the use of the pointer will be a scalar use, and all users of the
4578     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4579     // place the pointer in PossibleNonScalarPtrs.
4580     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4581           return isa<LoadInst>(U) || isa<StoreInst>(U);
4582         }))
4583       ScalarPtrs.insert(I);
4584     else
4585       PossibleNonScalarPtrs.insert(I);
4586   };
4587 
4588   // We seed the scalars analysis with three classes of instructions: (1)
4589   // instructions marked uniform-after-vectorization and (2) bitcast,
4590   // getelementptr and (pointer) phi instructions used by memory accesses
4591   // requiring a scalar use.
4592   //
4593   // (1) Add to the worklist all instructions that have been identified as
4594   // uniform-after-vectorization.
4595   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4596 
4597   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4598   // memory accesses requiring a scalar use. The pointer operands of loads and
4599   // stores will be scalar as long as the memory accesses is not a gather or
4600   // scatter operation. The value operand of a store will remain scalar if the
4601   // store is scalarized.
4602   for (auto *BB : TheLoop->blocks())
4603     for (auto &I : *BB) {
4604       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4605         evaluatePtrUse(Load, Load->getPointerOperand());
4606       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4607         evaluatePtrUse(Store, Store->getPointerOperand());
4608         evaluatePtrUse(Store, Store->getValueOperand());
4609       }
4610     }
4611   for (auto *I : ScalarPtrs)
4612     if (!PossibleNonScalarPtrs.count(I)) {
4613       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4614       Worklist.insert(I);
4615     }
4616 
4617   // Insert the forced scalars.
4618   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4619   // induction variable when the PHI user is scalarized.
4620   auto ForcedScalar = ForcedScalars.find(VF);
4621   if (ForcedScalar != ForcedScalars.end())
4622     for (auto *I : ForcedScalar->second)
4623       Worklist.insert(I);
4624 
4625   // Expand the worklist by looking through any bitcasts and getelementptr
4626   // instructions we've already identified as scalar. This is similar to the
4627   // expansion step in collectLoopUniforms(); however, here we're only
4628   // expanding to include additional bitcasts and getelementptr instructions.
4629   unsigned Idx = 0;
4630   while (Idx != Worklist.size()) {
4631     Instruction *Dst = Worklist[Idx++];
4632     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4633       continue;
4634     auto *Src = cast<Instruction>(Dst->getOperand(0));
4635     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4636           auto *J = cast<Instruction>(U);
4637           return !TheLoop->contains(J) || Worklist.count(J) ||
4638                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4639                   isScalarUse(J, Src));
4640         })) {
4641       Worklist.insert(Src);
4642       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4643     }
4644   }
4645 
4646   // An induction variable will remain scalar if all users of the induction
4647   // variable and induction variable update remain scalar.
4648   for (auto &Induction : Legal->getInductionVars()) {
4649     auto *Ind = Induction.first;
4650     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4651 
4652     // If tail-folding is applied, the primary induction variable will be used
4653     // to feed a vector compare.
4654     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
4655       continue;
4656 
4657     // Determine if all users of the induction variable are scalar after
4658     // vectorization.
4659     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4660       auto *I = cast<Instruction>(U);
4661       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
4662     });
4663     if (!ScalarInd)
4664       continue;
4665 
4666     // Determine if all users of the induction variable update instruction are
4667     // scalar after vectorization.
4668     auto ScalarIndUpdate =
4669         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4670           auto *I = cast<Instruction>(U);
4671           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
4672         });
4673     if (!ScalarIndUpdate)
4674       continue;
4675 
4676     // The induction variable and its update instruction will remain scalar.
4677     Worklist.insert(Ind);
4678     Worklist.insert(IndUpdate);
4679     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4680     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4681                       << "\n");
4682   }
4683 
4684   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4685 }
4686 
4687 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) {
4688   if (!blockNeedsPredication(I->getParent()))
4689     return false;
4690   switch(I->getOpcode()) {
4691   default:
4692     break;
4693   case Instruction::Load:
4694   case Instruction::Store: {
4695     if (!Legal->isMaskRequired(I))
4696       return false;
4697     auto *Ptr = getLoadStorePointerOperand(I);
4698     auto *Ty = getMemInstValueType(I);
4699     // We have already decided how to vectorize this instruction, get that
4700     // result.
4701     if (VF > 1) {
4702       InstWidening WideningDecision = getWideningDecision(I, VF);
4703       assert(WideningDecision != CM_Unknown &&
4704              "Widening decision should be ready at this moment");
4705       return WideningDecision == CM_Scalarize;
4706     }
4707     const Align Alignment = getLoadStoreAlignment(I);
4708     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4709                                 isLegalMaskedGather(Ty, Alignment))
4710                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4711                                 isLegalMaskedScatter(Ty, Alignment));
4712   }
4713   case Instruction::UDiv:
4714   case Instruction::SDiv:
4715   case Instruction::SRem:
4716   case Instruction::URem:
4717     return mayDivideByZero(*I);
4718   }
4719   return false;
4720 }
4721 
4722 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I,
4723                                                                unsigned VF) {
4724   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4725   assert(getWideningDecision(I, VF) == CM_Unknown &&
4726          "Decision should not be set yet.");
4727   auto *Group = getInterleavedAccessGroup(I);
4728   assert(Group && "Must have a group.");
4729 
4730   // If the instruction's allocated size doesn't equal it's type size, it
4731   // requires padding and will be scalarized.
4732   auto &DL = I->getModule()->getDataLayout();
4733   auto *ScalarTy = getMemInstValueType(I);
4734   if (hasIrregularType(ScalarTy, DL, VF))
4735     return false;
4736 
4737   // Check if masking is required.
4738   // A Group may need masking for one of two reasons: it resides in a block that
4739   // needs predication, or it was decided to use masking to deal with gaps.
4740   bool PredicatedAccessRequiresMasking =
4741       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
4742   bool AccessWithGapsRequiresMasking =
4743       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
4744   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
4745     return true;
4746 
4747   // If masked interleaving is required, we expect that the user/target had
4748   // enabled it, because otherwise it either wouldn't have been created or
4749   // it should have been invalidated by the CostModel.
4750   assert(useMaskedInterleavedAccesses(TTI) &&
4751          "Masked interleave-groups for predicated accesses are not enabled.");
4752 
4753   auto *Ty = getMemInstValueType(I);
4754   const Align Alignment = getLoadStoreAlignment(I);
4755   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4756                           : TTI.isLegalMaskedStore(Ty, Alignment);
4757 }
4758 
4759 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I,
4760                                                                unsigned VF) {
4761   // Get and ensure we have a valid memory instruction.
4762   LoadInst *LI = dyn_cast<LoadInst>(I);
4763   StoreInst *SI = dyn_cast<StoreInst>(I);
4764   assert((LI || SI) && "Invalid memory instruction");
4765 
4766   auto *Ptr = getLoadStorePointerOperand(I);
4767 
4768   // In order to be widened, the pointer should be consecutive, first of all.
4769   if (!Legal->isConsecutivePtr(Ptr))
4770     return false;
4771 
4772   // If the instruction is a store located in a predicated block, it will be
4773   // scalarized.
4774   if (isScalarWithPredication(I))
4775     return false;
4776 
4777   // If the instruction's allocated size doesn't equal it's type size, it
4778   // requires padding and will be scalarized.
4779   auto &DL = I->getModule()->getDataLayout();
4780   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
4781   if (hasIrregularType(ScalarTy, DL, VF))
4782     return false;
4783 
4784   return true;
4785 }
4786 
4787 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
4788   // We should not collect Uniforms more than once per VF. Right now,
4789   // this function is called from collectUniformsAndScalars(), which
4790   // already does this check. Collecting Uniforms for VF=1 does not make any
4791   // sense.
4792 
4793   assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() &&
4794          "This function should not be visited twice for the same VF");
4795 
4796   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4797   // not analyze again.  Uniforms.count(VF) will return 1.
4798   Uniforms[VF].clear();
4799 
4800   // We now know that the loop is vectorizable!
4801   // Collect instructions inside the loop that will remain uniform after
4802   // vectorization.
4803 
4804   // Global values, params and instructions outside of current loop are out of
4805   // scope.
4806   auto isOutOfScope = [&](Value *V) -> bool {
4807     Instruction *I = dyn_cast<Instruction>(V);
4808     return (!I || !TheLoop->contains(I));
4809   };
4810 
4811   SetVector<Instruction *> Worklist;
4812   BasicBlock *Latch = TheLoop->getLoopLatch();
4813 
4814   // Instructions that are scalar with predication must not be considered
4815   // uniform after vectorization, because that would create an erroneous
4816   // replicating region where only a single instance out of VF should be formed.
4817   // TODO: optimize such seldom cases if found important, see PR40816.
4818   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
4819     if (isScalarWithPredication(I, VF)) {
4820       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
4821                         << *I << "\n");
4822       return;
4823     }
4824     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
4825     Worklist.insert(I);
4826   };
4827 
4828   // Start with the conditional branch. If the branch condition is an
4829   // instruction contained in the loop that is only used by the branch, it is
4830   // uniform.
4831   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4832   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
4833     addToWorklistIfAllowed(Cmp);
4834 
4835   // Holds consecutive and consecutive-like pointers. Consecutive-like pointers
4836   // are pointers that are treated like consecutive pointers during
4837   // vectorization. The pointer operands of interleaved accesses are an
4838   // example.
4839   SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs;
4840 
4841   // Holds pointer operands of instructions that are possibly non-uniform.
4842   SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs;
4843 
4844   auto isUniformDecision = [&](Instruction *I, unsigned VF) {
4845     InstWidening WideningDecision = getWideningDecision(I, VF);
4846     assert(WideningDecision != CM_Unknown &&
4847            "Widening decision should be ready at this moment");
4848 
4849     return (WideningDecision == CM_Widen ||
4850             WideningDecision == CM_Widen_Reverse ||
4851             WideningDecision == CM_Interleave);
4852   };
4853   // Iterate over the instructions in the loop, and collect all
4854   // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible
4855   // that a consecutive-like pointer operand will be scalarized, we collect it
4856   // in PossibleNonUniformPtrs instead. We use two sets here because a single
4857   // getelementptr instruction can be used by both vectorized and scalarized
4858   // memory instructions. For example, if a loop loads and stores from the same
4859   // location, but the store is conditional, the store will be scalarized, and
4860   // the getelementptr won't remain uniform.
4861   for (auto *BB : TheLoop->blocks())
4862     for (auto &I : *BB) {
4863       // If there's no pointer operand, there's nothing to do.
4864       auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
4865       if (!Ptr)
4866         continue;
4867 
4868       // True if all users of Ptr are memory accesses that have Ptr as their
4869       // pointer operand.
4870       auto UsersAreMemAccesses =
4871           llvm::all_of(Ptr->users(), [&](User *U) -> bool {
4872             return getLoadStorePointerOperand(U) == Ptr;
4873           });
4874 
4875       // Ensure the memory instruction will not be scalarized or used by
4876       // gather/scatter, making its pointer operand non-uniform. If the pointer
4877       // operand is used by any instruction other than a memory access, we
4878       // conservatively assume the pointer operand may be non-uniform.
4879       if (!UsersAreMemAccesses || !isUniformDecision(&I, VF))
4880         PossibleNonUniformPtrs.insert(Ptr);
4881 
4882       // If the memory instruction will be vectorized and its pointer operand
4883       // is consecutive-like, or interleaving - the pointer operand should
4884       // remain uniform.
4885       else
4886         ConsecutiveLikePtrs.insert(Ptr);
4887     }
4888 
4889   // Add to the Worklist all consecutive and consecutive-like pointers that
4890   // aren't also identified as possibly non-uniform.
4891   for (auto *V : ConsecutiveLikePtrs)
4892     if (!PossibleNonUniformPtrs.count(V))
4893       addToWorklistIfAllowed(V);
4894 
4895   // Expand Worklist in topological order: whenever a new instruction
4896   // is added , its users should be already inside Worklist.  It ensures
4897   // a uniform instruction will only be used by uniform instructions.
4898   unsigned idx = 0;
4899   while (idx != Worklist.size()) {
4900     Instruction *I = Worklist[idx++];
4901 
4902     for (auto OV : I->operand_values()) {
4903       // isOutOfScope operands cannot be uniform instructions.
4904       if (isOutOfScope(OV))
4905         continue;
4906       // First order recurrence Phi's should typically be considered
4907       // non-uniform.
4908       auto *OP = dyn_cast<PHINode>(OV);
4909       if (OP && Legal->isFirstOrderRecurrence(OP))
4910         continue;
4911       // If all the users of the operand are uniform, then add the
4912       // operand into the uniform worklist.
4913       auto *OI = cast<Instruction>(OV);
4914       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
4915             auto *J = cast<Instruction>(U);
4916             return Worklist.count(J) ||
4917                    (OI == getLoadStorePointerOperand(J) &&
4918                     isUniformDecision(J, VF));
4919           }))
4920         addToWorklistIfAllowed(OI);
4921     }
4922   }
4923 
4924   // Returns true if Ptr is the pointer operand of a memory access instruction
4925   // I, and I is known to not require scalarization.
4926   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
4927     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
4928   };
4929 
4930   // For an instruction to be added into Worklist above, all its users inside
4931   // the loop should also be in Worklist. However, this condition cannot be
4932   // true for phi nodes that form a cyclic dependence. We must process phi
4933   // nodes separately. An induction variable will remain uniform if all users
4934   // of the induction variable and induction variable update remain uniform.
4935   // The code below handles both pointer and non-pointer induction variables.
4936   for (auto &Induction : Legal->getInductionVars()) {
4937     auto *Ind = Induction.first;
4938     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4939 
4940     // Determine if all users of the induction variable are uniform after
4941     // vectorization.
4942     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4943       auto *I = cast<Instruction>(U);
4944       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4945              isVectorizedMemAccessUse(I, Ind);
4946     });
4947     if (!UniformInd)
4948       continue;
4949 
4950     // Determine if all users of the induction variable update instruction are
4951     // uniform after vectorization.
4952     auto UniformIndUpdate =
4953         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4954           auto *I = cast<Instruction>(U);
4955           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4956                  isVectorizedMemAccessUse(I, IndUpdate);
4957         });
4958     if (!UniformIndUpdate)
4959       continue;
4960 
4961     // The induction variable and its update instruction will remain uniform.
4962     addToWorklistIfAllowed(Ind);
4963     addToWorklistIfAllowed(IndUpdate);
4964   }
4965 
4966   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
4967 }
4968 
4969 bool LoopVectorizationCostModel::runtimeChecksRequired() {
4970   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
4971 
4972   if (Legal->getRuntimePointerChecking()->Need) {
4973     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
4974         "runtime pointer checks needed. Enable vectorization of this "
4975         "loop with '#pragma clang loop vectorize(enable)' when "
4976         "compiling with -Os/-Oz",
4977         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4978     return true;
4979   }
4980 
4981   if (!PSE.getUnionPredicate().getPredicates().empty()) {
4982     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
4983         "runtime SCEV checks needed. Enable vectorization of this "
4984         "loop with '#pragma clang loop vectorize(enable)' when "
4985         "compiling with -Os/-Oz",
4986         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4987     return true;
4988   }
4989 
4990   // FIXME: Avoid specializing for stride==1 instead of bailing out.
4991   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
4992     reportVectorizationFailure("Runtime stride check for small trip count",
4993         "runtime stride == 1 checks needed. Enable vectorization of "
4994         "this loop without such check by compiling with -Os/-Oz",
4995         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4996     return true;
4997   }
4998 
4999   return false;
5000 }
5001 
5002 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(unsigned UserVF,
5003                                                             unsigned UserIC) {
5004   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5005     // TODO: It may by useful to do since it's still likely to be dynamically
5006     // uniform if the target can skip.
5007     reportVectorizationFailure(
5008         "Not inserting runtime ptr check for divergent target",
5009         "runtime pointer checks needed. Not enabled for divergent target",
5010         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5011     return None;
5012   }
5013 
5014   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5015   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5016   if (TC == 1) {
5017     reportVectorizationFailure("Single iteration (non) loop",
5018         "loop trip count is one, irrelevant for vectorization",
5019         "SingleIterationLoop", ORE, TheLoop);
5020     return None;
5021   }
5022 
5023   switch (ScalarEpilogueStatus) {
5024   case CM_ScalarEpilogueAllowed:
5025     return UserVF ? UserVF : computeFeasibleMaxVF(TC);
5026   case CM_ScalarEpilogueNotNeededUsePredicate:
5027     LLVM_DEBUG(
5028         dbgs() << "LV: vector predicate hint/switch found.\n"
5029                << "LV: Not allowing scalar epilogue, creating predicated "
5030                << "vector loop.\n");
5031     break;
5032   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5033     // fallthrough as a special case of OptForSize
5034   case CM_ScalarEpilogueNotAllowedOptSize:
5035     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5036       LLVM_DEBUG(
5037           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5038     else
5039       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5040                         << "count.\n");
5041 
5042     // Bail if runtime checks are required, which are not good when optimising
5043     // for size.
5044     if (runtimeChecksRequired())
5045       return None;
5046     break;
5047   }
5048 
5049   // Now try the tail folding
5050 
5051   // Invalidate interleave groups that require an epilogue if we can't mask
5052   // the interleave-group.
5053   if (!useMaskedInterleavedAccesses(TTI)) {
5054     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5055            "No decisions should have been taken at this point");
5056     // Note: There is no need to invalidate any cost modeling decisions here, as
5057     // non where taken so far.
5058     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5059   }
5060 
5061   unsigned MaxVF = UserVF ? UserVF : computeFeasibleMaxVF(TC);
5062   assert((UserVF || isPowerOf2_32(MaxVF)) && "MaxVF must be a power of 2");
5063   unsigned MaxVFtimesIC = UserIC ? MaxVF * UserIC : MaxVF;
5064   if (TC > 0 && TC % MaxVFtimesIC == 0) {
5065     // Accept MaxVF if we do not have a tail.
5066     LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5067     return MaxVF;
5068   }
5069 
5070   // If we don't know the precise trip count, or if the trip count that we
5071   // found modulo the vectorization factor is not zero, try to fold the tail
5072   // by masking.
5073   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5074   if (Legal->prepareToFoldTailByMasking()) {
5075     FoldTailByMasking = true;
5076     return MaxVF;
5077   }
5078 
5079   if (TC == 0) {
5080     reportVectorizationFailure(
5081         "Unable to calculate the loop count due to complex control flow",
5082         "unable to calculate the loop count due to complex control flow",
5083         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5084     return None;
5085   }
5086 
5087   reportVectorizationFailure(
5088       "Cannot optimize for size and vectorize at the same time.",
5089       "cannot optimize for size and vectorize at the same time. "
5090       "Enable vectorization of this loop with '#pragma clang loop "
5091       "vectorize(enable)' when compiling with -Os/-Oz",
5092       "NoTailLoopWithOptForSize", ORE, TheLoop);
5093   return None;
5094 }
5095 
5096 unsigned
5097 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount) {
5098   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5099   unsigned SmallestType, WidestType;
5100   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5101   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
5102 
5103   // Get the maximum safe dependence distance in bits computed by LAA.
5104   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5105   // the memory accesses that is most restrictive (involved in the smallest
5106   // dependence distance).
5107   unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth();
5108 
5109   WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth);
5110 
5111   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5112   // Note that both WidestRegister and WidestType may not be a powers of 2.
5113   unsigned MaxVectorSize = PowerOf2Floor(WidestRegister / WidestType);
5114 
5115   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5116                     << " / " << WidestType << " bits.\n");
5117   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5118                     << WidestRegister << " bits.\n");
5119 
5120   assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements"
5121                                  " into one vector!");
5122   if (MaxVectorSize == 0) {
5123     LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
5124     MaxVectorSize = 1;
5125     return MaxVectorSize;
5126   } else if (ConstTripCount && ConstTripCount < MaxVectorSize &&
5127              isPowerOf2_32(ConstTripCount)) {
5128     // We need to clamp the VF to be the ConstTripCount. There is no point in
5129     // choosing a higher viable VF as done in the loop below.
5130     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5131                       << ConstTripCount << "\n");
5132     MaxVectorSize = ConstTripCount;
5133     return MaxVectorSize;
5134   }
5135 
5136   unsigned MaxVF = MaxVectorSize;
5137   if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) ||
5138       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5139     // Collect all viable vectorization factors larger than the default MaxVF
5140     // (i.e. MaxVectorSize).
5141     SmallVector<unsigned, 8> VFs;
5142     unsigned NewMaxVectorSize = WidestRegister / SmallestType;
5143     for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2)
5144       VFs.push_back(VS);
5145 
5146     // For each VF calculate its register usage.
5147     auto RUs = calculateRegisterUsage(VFs);
5148 
5149     // Select the largest VF which doesn't require more registers than existing
5150     // ones.
5151     for (int i = RUs.size() - 1; i >= 0; --i) {
5152       bool Selected = true;
5153       for (auto& pair : RUs[i].MaxLocalUsers) {
5154         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5155         if (pair.second > TargetNumRegisters)
5156           Selected = false;
5157       }
5158       if (Selected) {
5159         MaxVF = VFs[i];
5160         break;
5161       }
5162     }
5163     if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) {
5164       if (MaxVF < MinVF) {
5165         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5166                           << ") with target's minimum: " << MinVF << '\n');
5167         MaxVF = MinVF;
5168       }
5169     }
5170   }
5171   return MaxVF;
5172 }
5173 
5174 VectorizationFactor
5175 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
5176   float Cost = expectedCost(1).first;
5177   const float ScalarCost = Cost;
5178   unsigned Width = 1;
5179   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
5180 
5181   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5182   if (ForceVectorization && MaxVF > 1) {
5183     // Ignore scalar width, because the user explicitly wants vectorization.
5184     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5185     // evaluation.
5186     Cost = std::numeric_limits<float>::max();
5187   }
5188 
5189   for (unsigned i = 2; i <= MaxVF; i *= 2) {
5190     // Notice that the vector loop needs to be executed less times, so
5191     // we need to divide the cost of the vector loops by the width of
5192     // the vector elements.
5193     VectorizationCostTy C = expectedCost(i);
5194     float VectorCost = C.first / (float)i;
5195     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5196                       << " costs: " << (int)VectorCost << ".\n");
5197     if (!C.second && !ForceVectorization) {
5198       LLVM_DEBUG(
5199           dbgs() << "LV: Not considering vector loop of width " << i
5200                  << " because it will not generate any vector instructions.\n");
5201       continue;
5202     }
5203     if (VectorCost < Cost) {
5204       Cost = VectorCost;
5205       Width = i;
5206     }
5207   }
5208 
5209   if (!EnableCondStoresVectorization && NumPredStores) {
5210     reportVectorizationFailure("There are conditional stores.",
5211         "store that is conditionally executed prevents vectorization",
5212         "ConditionalStore", ORE, TheLoop);
5213     Width = 1;
5214     Cost = ScalarCost;
5215   }
5216 
5217   LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
5218              << "LV: Vectorization seems to be not beneficial, "
5219              << "but was forced by a user.\n");
5220   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
5221   VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)};
5222   return Factor;
5223 }
5224 
5225 std::pair<unsigned, unsigned>
5226 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5227   unsigned MinWidth = -1U;
5228   unsigned MaxWidth = 8;
5229   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5230 
5231   // For each block.
5232   for (BasicBlock *BB : TheLoop->blocks()) {
5233     // For each instruction in the loop.
5234     for (Instruction &I : BB->instructionsWithoutDebug()) {
5235       Type *T = I.getType();
5236 
5237       // Skip ignored values.
5238       if (ValuesToIgnore.count(&I))
5239         continue;
5240 
5241       // Only examine Loads, Stores and PHINodes.
5242       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5243         continue;
5244 
5245       // Examine PHI nodes that are reduction variables. Update the type to
5246       // account for the recurrence type.
5247       if (auto *PN = dyn_cast<PHINode>(&I)) {
5248         if (!Legal->isReductionVariable(PN))
5249           continue;
5250         RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN];
5251         T = RdxDesc.getRecurrenceType();
5252       }
5253 
5254       // Examine the stored values.
5255       if (auto *ST = dyn_cast<StoreInst>(&I))
5256         T = ST->getValueOperand()->getType();
5257 
5258       // Ignore loaded pointer types and stored pointer types that are not
5259       // vectorizable.
5260       //
5261       // FIXME: The check here attempts to predict whether a load or store will
5262       //        be vectorized. We only know this for certain after a VF has
5263       //        been selected. Here, we assume that if an access can be
5264       //        vectorized, it will be. We should also look at extending this
5265       //        optimization to non-pointer types.
5266       //
5267       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
5268           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
5269         continue;
5270 
5271       MinWidth = std::min(MinWidth,
5272                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5273       MaxWidth = std::max(MaxWidth,
5274                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5275     }
5276   }
5277 
5278   return {MinWidth, MaxWidth};
5279 }
5280 
5281 unsigned LoopVectorizationCostModel::selectInterleaveCount(unsigned VF,
5282                                                            unsigned LoopCost) {
5283   // -- The interleave heuristics --
5284   // We interleave the loop in order to expose ILP and reduce the loop overhead.
5285   // There are many micro-architectural considerations that we can't predict
5286   // at this level. For example, frontend pressure (on decode or fetch) due to
5287   // code size, or the number and capabilities of the execution ports.
5288   //
5289   // We use the following heuristics to select the interleave count:
5290   // 1. If the code has reductions, then we interleave to break the cross
5291   // iteration dependency.
5292   // 2. If the loop is really small, then we interleave to reduce the loop
5293   // overhead.
5294   // 3. We don't interleave if we think that we will spill registers to memory
5295   // due to the increased register pressure.
5296 
5297   if (!isScalarEpilogueAllowed())
5298     return 1;
5299 
5300   // We used the distance for the interleave count.
5301   if (Legal->getMaxSafeDepDistBytes() != -1U)
5302     return 1;
5303 
5304   // Do not interleave loops with a relatively small known or estimated trip
5305   // count.
5306   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
5307   if (BestKnownTC && *BestKnownTC < TinyTripCountInterleaveThreshold)
5308     return 1;
5309 
5310   RegisterUsage R = calculateRegisterUsage({VF})[0];
5311   // We divide by these constants so assume that we have at least one
5312   // instruction that uses at least one register.
5313   for (auto& pair : R.MaxLocalUsers) {
5314     pair.second = std::max(pair.second, 1U);
5315   }
5316 
5317   // We calculate the interleave count using the following formula.
5318   // Subtract the number of loop invariants from the number of available
5319   // registers. These registers are used by all of the interleaved instances.
5320   // Next, divide the remaining registers by the number of registers that is
5321   // required by the loop, in order to estimate how many parallel instances
5322   // fit without causing spills. All of this is rounded down if necessary to be
5323   // a power of two. We want power of two interleave count to simplify any
5324   // addressing operations or alignment considerations.
5325   // We also want power of two interleave counts to ensure that the induction
5326   // variable of the vector loop wraps to zero, when tail is folded by masking;
5327   // this currently happens when OptForSize, in which case IC is set to 1 above.
5328   unsigned IC = UINT_MAX;
5329 
5330   for (auto& pair : R.MaxLocalUsers) {
5331     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5332     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
5333                       << " registers of "
5334                       << TTI.getRegisterClassName(pair.first) << " register class\n");
5335     if (VF == 1) {
5336       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
5337         TargetNumRegisters = ForceTargetNumScalarRegs;
5338     } else {
5339       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
5340         TargetNumRegisters = ForceTargetNumVectorRegs;
5341     }
5342     unsigned MaxLocalUsers = pair.second;
5343     unsigned LoopInvariantRegs = 0;
5344     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
5345       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
5346 
5347     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
5348     // Don't count the induction variable as interleaved.
5349     if (EnableIndVarRegisterHeur) {
5350       TmpIC =
5351           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
5352                         std::max(1U, (MaxLocalUsers - 1)));
5353     }
5354 
5355     IC = std::min(IC, TmpIC);
5356   }
5357 
5358   // Clamp the interleave ranges to reasonable counts.
5359   unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
5360 
5361   // Check if the user has overridden the max.
5362   if (VF == 1) {
5363     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
5364       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
5365   } else {
5366     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
5367       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
5368   }
5369 
5370   // If trip count is known or estimated compile time constant, limit the
5371   // interleave count to be less than the trip count divided by VF.
5372   if (BestKnownTC) {
5373     MaxInterleaveCount = std::min(*BestKnownTC / VF, MaxInterleaveCount);
5374   }
5375 
5376   // If we did not calculate the cost for VF (because the user selected the VF)
5377   // then we calculate the cost of VF here.
5378   if (LoopCost == 0)
5379     LoopCost = expectedCost(VF).first;
5380 
5381   assert(LoopCost && "Non-zero loop cost expected");
5382 
5383   // Clamp the calculated IC to be between the 1 and the max interleave count
5384   // that the target and trip count allows.
5385   if (IC > MaxInterleaveCount)
5386     IC = MaxInterleaveCount;
5387   else if (IC < 1)
5388     IC = 1;
5389 
5390   // Interleave if we vectorized this loop and there is a reduction that could
5391   // benefit from interleaving.
5392   if (VF > 1 && !Legal->getReductionVars().empty()) {
5393     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
5394     return IC;
5395   }
5396 
5397   // Note that if we've already vectorized the loop we will have done the
5398   // runtime check and so interleaving won't require further checks.
5399   bool InterleavingRequiresRuntimePointerCheck =
5400       (VF == 1 && Legal->getRuntimePointerChecking()->Need);
5401 
5402   // We want to interleave small loops in order to reduce the loop overhead and
5403   // potentially expose ILP opportunities.
5404   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
5405   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
5406     // We assume that the cost overhead is 1 and we use the cost model
5407     // to estimate the cost of the loop and interleave until the cost of the
5408     // loop overhead is about 5% of the cost of the loop.
5409     unsigned SmallIC =
5410         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
5411 
5412     // Interleave until store/load ports (estimated by max interleave count) are
5413     // saturated.
5414     unsigned NumStores = Legal->getNumStores();
5415     unsigned NumLoads = Legal->getNumLoads();
5416     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5417     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5418 
5419     // If we have a scalar reduction (vector reductions are already dealt with
5420     // by this point), we can increase the critical path length if the loop
5421     // we're interleaving is inside another loop. Limit, by default to 2, so the
5422     // critical path only gets increased by one reduction operation.
5423     if (!Legal->getReductionVars().empty() && TheLoop->getLoopDepth() > 1) {
5424       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
5425       SmallIC = std::min(SmallIC, F);
5426       StoresIC = std::min(StoresIC, F);
5427       LoadsIC = std::min(LoadsIC, F);
5428     }
5429 
5430     if (EnableLoadStoreRuntimeInterleave &&
5431         std::max(StoresIC, LoadsIC) > SmallIC) {
5432       LLVM_DEBUG(
5433           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
5434       return std::max(StoresIC, LoadsIC);
5435     }
5436 
5437     LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
5438     return SmallIC;
5439   }
5440 
5441   // Interleave if this is a large loop (small loops are already dealt with by
5442   // this point) that could benefit from interleaving.
5443   bool HasReductions = !Legal->getReductionVars().empty();
5444   if (TTI.enableAggressiveInterleaving(HasReductions)) {
5445     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5446     return IC;
5447   }
5448 
5449   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
5450   return 1;
5451 }
5452 
5453 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
5454 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
5455   // This function calculates the register usage by measuring the highest number
5456   // of values that are alive at a single location. Obviously, this is a very
5457   // rough estimation. We scan the loop in a topological order in order and
5458   // assign a number to each instruction. We use RPO to ensure that defs are
5459   // met before their users. We assume that each instruction that has in-loop
5460   // users starts an interval. We record every time that an in-loop value is
5461   // used, so we have a list of the first and last occurrences of each
5462   // instruction. Next, we transpose this data structure into a multi map that
5463   // holds the list of intervals that *end* at a specific location. This multi
5464   // map allows us to perform a linear search. We scan the instructions linearly
5465   // and record each time that a new interval starts, by placing it in a set.
5466   // If we find this value in the multi-map then we remove it from the set.
5467   // The max register usage is the maximum size of the set.
5468   // We also search for instructions that are defined outside the loop, but are
5469   // used inside the loop. We need this number separately from the max-interval
5470   // usage number because when we unroll, loop-invariant values do not take
5471   // more register.
5472   LoopBlocksDFS DFS(TheLoop);
5473   DFS.perform(LI);
5474 
5475   RegisterUsage RU;
5476 
5477   // Each 'key' in the map opens a new interval. The values
5478   // of the map are the index of the 'last seen' usage of the
5479   // instruction that is the key.
5480   using IntervalMap = DenseMap<Instruction *, unsigned>;
5481 
5482   // Maps instruction to its index.
5483   SmallVector<Instruction *, 64> IdxToInstr;
5484   // Marks the end of each interval.
5485   IntervalMap EndPoint;
5486   // Saves the list of instruction indices that are used in the loop.
5487   SmallPtrSet<Instruction *, 8> Ends;
5488   // Saves the list of values that are used in the loop but are
5489   // defined outside the loop, such as arguments and constants.
5490   SmallPtrSet<Value *, 8> LoopInvariants;
5491 
5492   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
5493     for (Instruction &I : BB->instructionsWithoutDebug()) {
5494       IdxToInstr.push_back(&I);
5495 
5496       // Save the end location of each USE.
5497       for (Value *U : I.operands()) {
5498         auto *Instr = dyn_cast<Instruction>(U);
5499 
5500         // Ignore non-instruction values such as arguments, constants, etc.
5501         if (!Instr)
5502           continue;
5503 
5504         // If this instruction is outside the loop then record it and continue.
5505         if (!TheLoop->contains(Instr)) {
5506           LoopInvariants.insert(Instr);
5507           continue;
5508         }
5509 
5510         // Overwrite previous end points.
5511         EndPoint[Instr] = IdxToInstr.size();
5512         Ends.insert(Instr);
5513       }
5514     }
5515   }
5516 
5517   // Saves the list of intervals that end with the index in 'key'.
5518   using InstrList = SmallVector<Instruction *, 2>;
5519   DenseMap<unsigned, InstrList> TransposeEnds;
5520 
5521   // Transpose the EndPoints to a list of values that end at each index.
5522   for (auto &Interval : EndPoint)
5523     TransposeEnds[Interval.second].push_back(Interval.first);
5524 
5525   SmallPtrSet<Instruction *, 8> OpenIntervals;
5526 
5527   // Get the size of the widest register.
5528   unsigned MaxSafeDepDist = -1U;
5529   if (Legal->getMaxSafeDepDistBytes() != -1U)
5530     MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
5531   unsigned WidestRegister =
5532       std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist);
5533   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5534 
5535   SmallVector<RegisterUsage, 8> RUs(VFs.size());
5536   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
5537 
5538   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
5539 
5540   // A lambda that gets the register usage for the given type and VF.
5541   auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
5542     if (Ty->isTokenTy())
5543       return 0U;
5544     unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
5545     return std::max<unsigned>(1, VF * TypeSize / WidestRegister);
5546   };
5547 
5548   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
5549     Instruction *I = IdxToInstr[i];
5550 
5551     // Remove all of the instructions that end at this location.
5552     InstrList &List = TransposeEnds[i];
5553     for (Instruction *ToRemove : List)
5554       OpenIntervals.erase(ToRemove);
5555 
5556     // Ignore instructions that are never used within the loop.
5557     if (!Ends.count(I))
5558       continue;
5559 
5560     // Skip ignored values.
5561     if (ValuesToIgnore.count(I))
5562       continue;
5563 
5564     // For each VF find the maximum usage of registers.
5565     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
5566       // Count the number of live intervals.
5567       SmallMapVector<unsigned, unsigned, 4> RegUsage;
5568 
5569       if (VFs[j] == 1) {
5570         for (auto Inst : OpenIntervals) {
5571           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
5572           if (RegUsage.find(ClassID) == RegUsage.end())
5573             RegUsage[ClassID] = 1;
5574           else
5575             RegUsage[ClassID] += 1;
5576         }
5577       } else {
5578         collectUniformsAndScalars(VFs[j]);
5579         for (auto Inst : OpenIntervals) {
5580           // Skip ignored values for VF > 1.
5581           if (VecValuesToIgnore.count(Inst))
5582             continue;
5583           if (isScalarAfterVectorization(Inst, VFs[j])) {
5584             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
5585             if (RegUsage.find(ClassID) == RegUsage.end())
5586               RegUsage[ClassID] = 1;
5587             else
5588               RegUsage[ClassID] += 1;
5589           } else {
5590             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
5591             if (RegUsage.find(ClassID) == RegUsage.end())
5592               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
5593             else
5594               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
5595           }
5596         }
5597       }
5598 
5599       for (auto& pair : RegUsage) {
5600         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
5601           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
5602         else
5603           MaxUsages[j][pair.first] = pair.second;
5604       }
5605     }
5606 
5607     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
5608                       << OpenIntervals.size() << '\n');
5609 
5610     // Add the current instruction to the list of open intervals.
5611     OpenIntervals.insert(I);
5612   }
5613 
5614   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
5615     SmallMapVector<unsigned, unsigned, 4> Invariant;
5616 
5617     for (auto Inst : LoopInvariants) {
5618       unsigned Usage = VFs[i] == 1 ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
5619       unsigned ClassID = TTI.getRegisterClassForType(VFs[i] > 1, Inst->getType());
5620       if (Invariant.find(ClassID) == Invariant.end())
5621         Invariant[ClassID] = Usage;
5622       else
5623         Invariant[ClassID] += Usage;
5624     }
5625 
5626     LLVM_DEBUG({
5627       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
5628       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
5629              << " item\n";
5630       for (const auto &pair : MaxUsages[i]) {
5631         dbgs() << "LV(REG): RegisterClass: "
5632                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
5633                << " registers\n";
5634       }
5635       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
5636              << " item\n";
5637       for (const auto &pair : Invariant) {
5638         dbgs() << "LV(REG): RegisterClass: "
5639                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
5640                << " registers\n";
5641       }
5642     });
5643 
5644     RU.LoopInvariantRegs = Invariant;
5645     RU.MaxLocalUsers = MaxUsages[i];
5646     RUs[i] = RU;
5647   }
5648 
5649   return RUs;
5650 }
5651 
5652 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
5653   // TODO: Cost model for emulated masked load/store is completely
5654   // broken. This hack guides the cost model to use an artificially
5655   // high enough value to practically disable vectorization with such
5656   // operations, except where previously deployed legality hack allowed
5657   // using very low cost values. This is to avoid regressions coming simply
5658   // from moving "masked load/store" check from legality to cost model.
5659   // Masked Load/Gather emulation was previously never allowed.
5660   // Limited number of Masked Store/Scatter emulation was allowed.
5661   assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction");
5662   return isa<LoadInst>(I) ||
5663          (isa<StoreInst>(I) &&
5664           NumPredStores > NumberOfStoresToPredicate);
5665 }
5666 
5667 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) {
5668   // If we aren't vectorizing the loop, or if we've already collected the
5669   // instructions to scalarize, there's nothing to do. Collection may already
5670   // have occurred if we have a user-selected VF and are now computing the
5671   // expected cost for interleaving.
5672   if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end())
5673     return;
5674 
5675   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
5676   // not profitable to scalarize any instructions, the presence of VF in the
5677   // map will indicate that we've analyzed it already.
5678   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
5679 
5680   // Find all the instructions that are scalar with predication in the loop and
5681   // determine if it would be better to not if-convert the blocks they are in.
5682   // If so, we also record the instructions to scalarize.
5683   for (BasicBlock *BB : TheLoop->blocks()) {
5684     if (!blockNeedsPredication(BB))
5685       continue;
5686     for (Instruction &I : *BB)
5687       if (isScalarWithPredication(&I)) {
5688         ScalarCostsTy ScalarCosts;
5689         // Do not apply discount logic if hacked cost is needed
5690         // for emulated masked memrefs.
5691         if (!useEmulatedMaskMemRefHack(&I) &&
5692             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
5693           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
5694         // Remember that BB will remain after vectorization.
5695         PredicatedBBsAfterVectorization.insert(BB);
5696       }
5697   }
5698 }
5699 
5700 int LoopVectorizationCostModel::computePredInstDiscount(
5701     Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts,
5702     unsigned VF) {
5703   assert(!isUniformAfterVectorization(PredInst, VF) &&
5704          "Instruction marked uniform-after-vectorization will be predicated");
5705 
5706   // Initialize the discount to zero, meaning that the scalar version and the
5707   // vector version cost the same.
5708   int Discount = 0;
5709 
5710   // Holds instructions to analyze. The instructions we visit are mapped in
5711   // ScalarCosts. Those instructions are the ones that would be scalarized if
5712   // we find that the scalar version costs less.
5713   SmallVector<Instruction *, 8> Worklist;
5714 
5715   // Returns true if the given instruction can be scalarized.
5716   auto canBeScalarized = [&](Instruction *I) -> bool {
5717     // We only attempt to scalarize instructions forming a single-use chain
5718     // from the original predicated block that would otherwise be vectorized.
5719     // Although not strictly necessary, we give up on instructions we know will
5720     // already be scalar to avoid traversing chains that are unlikely to be
5721     // beneficial.
5722     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
5723         isScalarAfterVectorization(I, VF))
5724       return false;
5725 
5726     // If the instruction is scalar with predication, it will be analyzed
5727     // separately. We ignore it within the context of PredInst.
5728     if (isScalarWithPredication(I))
5729       return false;
5730 
5731     // If any of the instruction's operands are uniform after vectorization,
5732     // the instruction cannot be scalarized. This prevents, for example, a
5733     // masked load from being scalarized.
5734     //
5735     // We assume we will only emit a value for lane zero of an instruction
5736     // marked uniform after vectorization, rather than VF identical values.
5737     // Thus, if we scalarize an instruction that uses a uniform, we would
5738     // create uses of values corresponding to the lanes we aren't emitting code
5739     // for. This behavior can be changed by allowing getScalarValue to clone
5740     // the lane zero values for uniforms rather than asserting.
5741     for (Use &U : I->operands())
5742       if (auto *J = dyn_cast<Instruction>(U.get()))
5743         if (isUniformAfterVectorization(J, VF))
5744           return false;
5745 
5746     // Otherwise, we can scalarize the instruction.
5747     return true;
5748   };
5749 
5750   // Compute the expected cost discount from scalarizing the entire expression
5751   // feeding the predicated instruction. We currently only consider expressions
5752   // that are single-use instruction chains.
5753   Worklist.push_back(PredInst);
5754   while (!Worklist.empty()) {
5755     Instruction *I = Worklist.pop_back_val();
5756 
5757     // If we've already analyzed the instruction, there's nothing to do.
5758     if (ScalarCosts.find(I) != ScalarCosts.end())
5759       continue;
5760 
5761     // Compute the cost of the vector instruction. Note that this cost already
5762     // includes the scalarization overhead of the predicated instruction.
5763     unsigned VectorCost = getInstructionCost(I, VF).first;
5764 
5765     // Compute the cost of the scalarized instruction. This cost is the cost of
5766     // the instruction as if it wasn't if-converted and instead remained in the
5767     // predicated block. We will scale this cost by block probability after
5768     // computing the scalarization overhead.
5769     unsigned ScalarCost = VF * getInstructionCost(I, 1).first;
5770 
5771     // Compute the scalarization overhead of needed insertelement instructions
5772     // and phi nodes.
5773     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
5774       ScalarCost += TTI.getScalarizationOverhead(
5775           cast<VectorType>(ToVectorTy(I->getType(), VF)),
5776           APInt::getAllOnesValue(VF), true, false);
5777       ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI,
5778                                             TTI::TCK_RecipThroughput);
5779     }
5780 
5781     // Compute the scalarization overhead of needed extractelement
5782     // instructions. For each of the instruction's operands, if the operand can
5783     // be scalarized, add it to the worklist; otherwise, account for the
5784     // overhead.
5785     for (Use &U : I->operands())
5786       if (auto *J = dyn_cast<Instruction>(U.get())) {
5787         assert(VectorType::isValidElementType(J->getType()) &&
5788                "Instruction has non-scalar type");
5789         if (canBeScalarized(J))
5790           Worklist.push_back(J);
5791         else if (needsExtract(J, VF))
5792           ScalarCost += TTI.getScalarizationOverhead(
5793               cast<VectorType>(ToVectorTy(J->getType(), VF)),
5794               APInt::getAllOnesValue(VF), false, true);
5795       }
5796 
5797     // Scale the total scalar cost by block probability.
5798     ScalarCost /= getReciprocalPredBlockProb();
5799 
5800     // Compute the discount. A non-negative discount means the vector version
5801     // of the instruction costs more, and scalarizing would be beneficial.
5802     Discount += VectorCost - ScalarCost;
5803     ScalarCosts[I] = ScalarCost;
5804   }
5805 
5806   return Discount;
5807 }
5808 
5809 LoopVectorizationCostModel::VectorizationCostTy
5810 LoopVectorizationCostModel::expectedCost(unsigned VF) {
5811   VectorizationCostTy Cost;
5812 
5813   // For each block.
5814   for (BasicBlock *BB : TheLoop->blocks()) {
5815     VectorizationCostTy BlockCost;
5816 
5817     // For each instruction in the old loop.
5818     for (Instruction &I : BB->instructionsWithoutDebug()) {
5819       // Skip ignored values.
5820       if (ValuesToIgnore.count(&I) || (VF > 1 && VecValuesToIgnore.count(&I)))
5821         continue;
5822 
5823       VectorizationCostTy C = getInstructionCost(&I, VF);
5824 
5825       // Check if we should override the cost.
5826       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
5827         C.first = ForceTargetInstructionCost;
5828 
5829       BlockCost.first += C.first;
5830       BlockCost.second |= C.second;
5831       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
5832                         << " for VF " << VF << " For instruction: " << I
5833                         << '\n');
5834     }
5835 
5836     // If we are vectorizing a predicated block, it will have been
5837     // if-converted. This means that the block's instructions (aside from
5838     // stores and instructions that may divide by zero) will now be
5839     // unconditionally executed. For the scalar case, we may not always execute
5840     // the predicated block. Thus, scale the block's cost by the probability of
5841     // executing it.
5842     if (VF == 1 && blockNeedsPredication(BB))
5843       BlockCost.first /= getReciprocalPredBlockProb();
5844 
5845     Cost.first += BlockCost.first;
5846     Cost.second |= BlockCost.second;
5847   }
5848 
5849   return Cost;
5850 }
5851 
5852 /// Gets Address Access SCEV after verifying that the access pattern
5853 /// is loop invariant except the induction variable dependence.
5854 ///
5855 /// This SCEV can be sent to the Target in order to estimate the address
5856 /// calculation cost.
5857 static const SCEV *getAddressAccessSCEV(
5858               Value *Ptr,
5859               LoopVectorizationLegality *Legal,
5860               PredicatedScalarEvolution &PSE,
5861               const Loop *TheLoop) {
5862 
5863   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
5864   if (!Gep)
5865     return nullptr;
5866 
5867   // We are looking for a gep with all loop invariant indices except for one
5868   // which should be an induction variable.
5869   auto SE = PSE.getSE();
5870   unsigned NumOperands = Gep->getNumOperands();
5871   for (unsigned i = 1; i < NumOperands; ++i) {
5872     Value *Opd = Gep->getOperand(i);
5873     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
5874         !Legal->isInductionVariable(Opd))
5875       return nullptr;
5876   }
5877 
5878   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
5879   return PSE.getSCEV(Ptr);
5880 }
5881 
5882 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
5883   return Legal->hasStride(I->getOperand(0)) ||
5884          Legal->hasStride(I->getOperand(1));
5885 }
5886 
5887 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5888                                                                  unsigned VF) {
5889   assert(VF > 1 && "Scalarization cost of instruction implies vectorization.");
5890   Type *ValTy = getMemInstValueType(I);
5891   auto SE = PSE.getSE();
5892 
5893   unsigned AS = getLoadStoreAddressSpace(I);
5894   Value *Ptr = getLoadStorePointerOperand(I);
5895   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
5896 
5897   // Figure out whether the access is strided and get the stride value
5898   // if it's known in compile time
5899   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
5900 
5901   // Get the cost of the scalar memory instruction and address computation.
5902   unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
5903 
5904   // Don't pass *I here, since it is scalar but will actually be part of a
5905   // vectorized loop where the user of it is a vectorized instruction.
5906   const Align Alignment = getLoadStoreAlignment(I);
5907   Cost += VF * TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(),
5908                                    Alignment, AS,
5909                                    TTI::TCK_RecipThroughput);
5910 
5911   // Get the overhead of the extractelement and insertelement instructions
5912   // we might create due to scalarization.
5913   Cost += getScalarizationOverhead(I, VF);
5914 
5915   // If we have a predicated store, it may not be executed for each vector
5916   // lane. Scale the cost by the probability of executing the predicated
5917   // block.
5918   if (isPredicatedInst(I)) {
5919     Cost /= getReciprocalPredBlockProb();
5920 
5921     if (useEmulatedMaskMemRefHack(I))
5922       // Artificially setting to a high enough value to practically disable
5923       // vectorization with such operations.
5924       Cost = 3000000;
5925   }
5926 
5927   return Cost;
5928 }
5929 
5930 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
5931                                                              unsigned VF) {
5932   Type *ValTy = getMemInstValueType(I);
5933   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5934   Value *Ptr = getLoadStorePointerOperand(I);
5935   unsigned AS = getLoadStoreAddressSpace(I);
5936   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
5937   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
5938 
5939   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5940          "Stride should be 1 or -1 for consecutive memory access");
5941   const Align Alignment = getLoadStoreAlignment(I);
5942   unsigned Cost = 0;
5943   if (Legal->isMaskRequired(I))
5944     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5945                                       CostKind);
5946   else
5947     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
5948                                 CostKind, I);
5949 
5950   bool Reverse = ConsecutiveStride < 0;
5951   if (Reverse)
5952     Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5953   return Cost;
5954 }
5955 
5956 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
5957                                                          unsigned VF) {
5958   Type *ValTy = getMemInstValueType(I);
5959   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5960   const Align Alignment = getLoadStoreAlignment(I);
5961   unsigned AS = getLoadStoreAddressSpace(I);
5962   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
5963   if (isa<LoadInst>(I)) {
5964     return TTI.getAddressComputationCost(ValTy) +
5965            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
5966                                CostKind) +
5967            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
5968   }
5969   StoreInst *SI = cast<StoreInst>(I);
5970 
5971   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
5972   return TTI.getAddressComputationCost(ValTy) +
5973          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
5974                              CostKind) +
5975          (isLoopInvariantStoreValue
5976               ? 0
5977               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
5978                                        VF - 1));
5979 }
5980 
5981 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
5982                                                           unsigned VF) {
5983   Type *ValTy = getMemInstValueType(I);
5984   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5985   const Align Alignment = getLoadStoreAlignment(I);
5986   const Value *Ptr = getLoadStorePointerOperand(I);
5987 
5988   return TTI.getAddressComputationCost(VectorTy) +
5989          TTI.getGatherScatterOpCost(
5990              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
5991              TargetTransformInfo::TCK_RecipThroughput, I);
5992 }
5993 
5994 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
5995                                                             unsigned VF) {
5996   Type *ValTy = getMemInstValueType(I);
5997   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5998   unsigned AS = getLoadStoreAddressSpace(I);
5999 
6000   auto Group = getInterleavedAccessGroup(I);
6001   assert(Group && "Fail to get an interleaved access group.");
6002 
6003   unsigned InterleaveFactor = Group->getFactor();
6004   auto *WideVecTy = FixedVectorType::get(ValTy, VF * InterleaveFactor);
6005 
6006   // Holds the indices of existing members in an interleaved load group.
6007   // An interleaved store group doesn't need this as it doesn't allow gaps.
6008   SmallVector<unsigned, 4> Indices;
6009   if (isa<LoadInst>(I)) {
6010     for (unsigned i = 0; i < InterleaveFactor; i++)
6011       if (Group->getMember(i))
6012         Indices.push_back(i);
6013   }
6014 
6015   // Calculate the cost of the whole interleaved group.
6016   bool UseMaskForGaps =
6017       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
6018   unsigned Cost = TTI.getInterleavedMemoryOpCost(
6019       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6020       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6021 
6022   if (Group->isReverse()) {
6023     // TODO: Add support for reversed masked interleaved access.
6024     assert(!Legal->isMaskRequired(I) &&
6025            "Reverse masked interleaved access not supported.");
6026     Cost += Group->getNumMembers() *
6027             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
6028   }
6029   return Cost;
6030 }
6031 
6032 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
6033                                                               unsigned VF) {
6034   // Calculate scalar cost only. Vectorization cost should be ready at this
6035   // moment.
6036   if (VF == 1) {
6037     Type *ValTy = getMemInstValueType(I);
6038     const Align Alignment = getLoadStoreAlignment(I);
6039     unsigned AS = getLoadStoreAddressSpace(I);
6040 
6041     return TTI.getAddressComputationCost(ValTy) +
6042            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
6043                                TTI::TCK_RecipThroughput, I);
6044   }
6045   return getWideningCost(I, VF);
6046 }
6047 
6048 LoopVectorizationCostModel::VectorizationCostTy
6049 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
6050   // If we know that this instruction will remain uniform, check the cost of
6051   // the scalar version.
6052   if (isUniformAfterVectorization(I, VF))
6053     VF = 1;
6054 
6055   if (VF > 1 && isProfitableToScalarize(I, VF))
6056     return VectorizationCostTy(InstsToScalarize[VF][I], false);
6057 
6058   // Forced scalars do not have any scalarization overhead.
6059   auto ForcedScalar = ForcedScalars.find(VF);
6060   if (VF > 1 && ForcedScalar != ForcedScalars.end()) {
6061     auto InstSet = ForcedScalar->second;
6062     if (InstSet.count(I))
6063       return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false);
6064   }
6065 
6066   Type *VectorTy;
6067   unsigned C = getInstructionCost(I, VF, VectorTy);
6068 
6069   bool TypeNotScalarized =
6070       VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF;
6071   return VectorizationCostTy(C, TypeNotScalarized);
6072 }
6073 
6074 unsigned LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
6075                                                               unsigned VF) {
6076 
6077   if (VF == 1)
6078     return 0;
6079 
6080   unsigned Cost = 0;
6081   Type *RetTy = ToVectorTy(I->getType(), VF);
6082   if (!RetTy->isVoidTy() &&
6083       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
6084     Cost += TTI.getScalarizationOverhead(
6085         cast<VectorType>(RetTy), APInt::getAllOnesValue(VF), true, false);
6086 
6087   // Some targets keep addresses scalar.
6088   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
6089     return Cost;
6090 
6091   // Some targets support efficient element stores.
6092   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
6093     return Cost;
6094 
6095   // Collect operands to consider.
6096   CallInst *CI = dyn_cast<CallInst>(I);
6097   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
6098 
6099   // Skip operands that do not require extraction/scalarization and do not incur
6100   // any overhead.
6101   return Cost + TTI.getOperandsScalarizationOverhead(
6102                     filterExtractingOperands(Ops, VF), VF);
6103 }
6104 
6105 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) {
6106   if (VF == 1)
6107     return;
6108   NumPredStores = 0;
6109   for (BasicBlock *BB : TheLoop->blocks()) {
6110     // For each instruction in the old loop.
6111     for (Instruction &I : *BB) {
6112       Value *Ptr =  getLoadStorePointerOperand(&I);
6113       if (!Ptr)
6114         continue;
6115 
6116       // TODO: We should generate better code and update the cost model for
6117       // predicated uniform stores. Today they are treated as any other
6118       // predicated store (see added test cases in
6119       // invariant-store-vectorization.ll).
6120       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
6121         NumPredStores++;
6122 
6123       if (Legal->isUniform(Ptr) &&
6124           // Conditional loads and stores should be scalarized and predicated.
6125           // isScalarWithPredication cannot be used here since masked
6126           // gather/scatters are not considered scalar with predication.
6127           !Legal->blockNeedsPredication(I.getParent())) {
6128         // TODO: Avoid replicating loads and stores instead of
6129         // relying on instcombine to remove them.
6130         // Load: Scalar load + broadcast
6131         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
6132         unsigned Cost = getUniformMemOpCost(&I, VF);
6133         setWideningDecision(&I, VF, CM_Scalarize, Cost);
6134         continue;
6135       }
6136 
6137       // We assume that widening is the best solution when possible.
6138       if (memoryInstructionCanBeWidened(&I, VF)) {
6139         unsigned Cost = getConsecutiveMemOpCost(&I, VF);
6140         int ConsecutiveStride =
6141                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
6142         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6143                "Expected consecutive stride.");
6144         InstWidening Decision =
6145             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
6146         setWideningDecision(&I, VF, Decision, Cost);
6147         continue;
6148       }
6149 
6150       // Choose between Interleaving, Gather/Scatter or Scalarization.
6151       unsigned InterleaveCost = std::numeric_limits<unsigned>::max();
6152       unsigned NumAccesses = 1;
6153       if (isAccessInterleaved(&I)) {
6154         auto Group = getInterleavedAccessGroup(&I);
6155         assert(Group && "Fail to get an interleaved access group.");
6156 
6157         // Make one decision for the whole group.
6158         if (getWideningDecision(&I, VF) != CM_Unknown)
6159           continue;
6160 
6161         NumAccesses = Group->getNumMembers();
6162         if (interleavedAccessCanBeWidened(&I, VF))
6163           InterleaveCost = getInterleaveGroupCost(&I, VF);
6164       }
6165 
6166       unsigned GatherScatterCost =
6167           isLegalGatherOrScatter(&I)
6168               ? getGatherScatterCost(&I, VF) * NumAccesses
6169               : std::numeric_limits<unsigned>::max();
6170 
6171       unsigned ScalarizationCost =
6172           getMemInstScalarizationCost(&I, VF) * NumAccesses;
6173 
6174       // Choose better solution for the current VF,
6175       // write down this decision and use it during vectorization.
6176       unsigned Cost;
6177       InstWidening Decision;
6178       if (InterleaveCost <= GatherScatterCost &&
6179           InterleaveCost < ScalarizationCost) {
6180         Decision = CM_Interleave;
6181         Cost = InterleaveCost;
6182       } else if (GatherScatterCost < ScalarizationCost) {
6183         Decision = CM_GatherScatter;
6184         Cost = GatherScatterCost;
6185       } else {
6186         Decision = CM_Scalarize;
6187         Cost = ScalarizationCost;
6188       }
6189       // If the instructions belongs to an interleave group, the whole group
6190       // receives the same decision. The whole group receives the cost, but
6191       // the cost will actually be assigned to one instruction.
6192       if (auto Group = getInterleavedAccessGroup(&I))
6193         setWideningDecision(Group, VF, Decision, Cost);
6194       else
6195         setWideningDecision(&I, VF, Decision, Cost);
6196     }
6197   }
6198 
6199   // Make sure that any load of address and any other address computation
6200   // remains scalar unless there is gather/scatter support. This avoids
6201   // inevitable extracts into address registers, and also has the benefit of
6202   // activating LSR more, since that pass can't optimize vectorized
6203   // addresses.
6204   if (TTI.prefersVectorizedAddressing())
6205     return;
6206 
6207   // Start with all scalar pointer uses.
6208   SmallPtrSet<Instruction *, 8> AddrDefs;
6209   for (BasicBlock *BB : TheLoop->blocks())
6210     for (Instruction &I : *BB) {
6211       Instruction *PtrDef =
6212         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
6213       if (PtrDef && TheLoop->contains(PtrDef) &&
6214           getWideningDecision(&I, VF) != CM_GatherScatter)
6215         AddrDefs.insert(PtrDef);
6216     }
6217 
6218   // Add all instructions used to generate the addresses.
6219   SmallVector<Instruction *, 4> Worklist;
6220   for (auto *I : AddrDefs)
6221     Worklist.push_back(I);
6222   while (!Worklist.empty()) {
6223     Instruction *I = Worklist.pop_back_val();
6224     for (auto &Op : I->operands())
6225       if (auto *InstOp = dyn_cast<Instruction>(Op))
6226         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
6227             AddrDefs.insert(InstOp).second)
6228           Worklist.push_back(InstOp);
6229   }
6230 
6231   for (auto *I : AddrDefs) {
6232     if (isa<LoadInst>(I)) {
6233       // Setting the desired widening decision should ideally be handled in
6234       // by cost functions, but since this involves the task of finding out
6235       // if the loaded register is involved in an address computation, it is
6236       // instead changed here when we know this is the case.
6237       InstWidening Decision = getWideningDecision(I, VF);
6238       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
6239         // Scalarize a widened load of address.
6240         setWideningDecision(I, VF, CM_Scalarize,
6241                             (VF * getMemoryInstructionCost(I, 1)));
6242       else if (auto Group = getInterleavedAccessGroup(I)) {
6243         // Scalarize an interleave group of address loads.
6244         for (unsigned I = 0; I < Group->getFactor(); ++I) {
6245           if (Instruction *Member = Group->getMember(I))
6246             setWideningDecision(Member, VF, CM_Scalarize,
6247                                 (VF * getMemoryInstructionCost(Member, 1)));
6248         }
6249       }
6250     } else
6251       // Make sure I gets scalarized and a cost estimate without
6252       // scalarization overhead.
6253       ForcedScalars[VF].insert(I);
6254   }
6255 }
6256 
6257 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6258                                                         unsigned VF,
6259                                                         Type *&VectorTy) {
6260   Type *RetTy = I->getType();
6261   if (canTruncateToMinimalBitwidth(I, VF))
6262     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
6263   VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
6264   auto SE = PSE.getSE();
6265   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6266 
6267   // TODO: We need to estimate the cost of intrinsic calls.
6268   switch (I->getOpcode()) {
6269   case Instruction::GetElementPtr:
6270     // We mark this instruction as zero-cost because the cost of GEPs in
6271     // vectorized code depends on whether the corresponding memory instruction
6272     // is scalarized or not. Therefore, we handle GEPs with the memory
6273     // instruction cost.
6274     return 0;
6275   case Instruction::Br: {
6276     // In cases of scalarized and predicated instructions, there will be VF
6277     // predicated blocks in the vectorized loop. Each branch around these
6278     // blocks requires also an extract of its vector compare i1 element.
6279     bool ScalarPredicatedBB = false;
6280     BranchInst *BI = cast<BranchInst>(I);
6281     if (VF > 1 && BI->isConditional() &&
6282         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
6283          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
6284       ScalarPredicatedBB = true;
6285 
6286     if (ScalarPredicatedBB) {
6287       // Return cost for branches around scalarized and predicated blocks.
6288       auto *Vec_i1Ty =
6289           FixedVectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
6290       return (TTI.getScalarizationOverhead(Vec_i1Ty, APInt::getAllOnesValue(VF),
6291                                            false, true) +
6292               (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF));
6293     } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1)
6294       // The back-edge branch will remain, as will all scalar branches.
6295       return TTI.getCFInstrCost(Instruction::Br, CostKind);
6296     else
6297       // This branch will be eliminated by if-conversion.
6298       return 0;
6299     // Note: We currently assume zero cost for an unconditional branch inside
6300     // a predicated block since it will become a fall-through, although we
6301     // may decide in the future to call TTI for all branches.
6302   }
6303   case Instruction::PHI: {
6304     auto *Phi = cast<PHINode>(I);
6305 
6306     // First-order recurrences are replaced by vector shuffles inside the loop.
6307     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
6308     if (VF > 1 && Legal->isFirstOrderRecurrence(Phi))
6309       return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
6310                                 cast<VectorType>(VectorTy), VF - 1,
6311                                 FixedVectorType::get(RetTy, 1));
6312 
6313     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
6314     // converted into select instructions. We require N - 1 selects per phi
6315     // node, where N is the number of incoming values.
6316     if (VF > 1 && Phi->getParent() != TheLoop->getHeader())
6317       return (Phi->getNumIncomingValues() - 1) *
6318              TTI.getCmpSelInstrCost(
6319                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
6320                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
6321                  CostKind);
6322 
6323     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
6324   }
6325   case Instruction::UDiv:
6326   case Instruction::SDiv:
6327   case Instruction::URem:
6328   case Instruction::SRem:
6329     // If we have a predicated instruction, it may not be executed for each
6330     // vector lane. Get the scalarization cost and scale this amount by the
6331     // probability of executing the predicated block. If the instruction is not
6332     // predicated, we fall through to the next case.
6333     if (VF > 1 && isScalarWithPredication(I)) {
6334       unsigned Cost = 0;
6335 
6336       // These instructions have a non-void type, so account for the phi nodes
6337       // that we will create. This cost is likely to be zero. The phi node
6338       // cost, if any, should be scaled by the block probability because it
6339       // models a copy at the end of each predicated block.
6340       Cost += VF * TTI.getCFInstrCost(Instruction::PHI, CostKind);
6341 
6342       // The cost of the non-predicated instruction.
6343       Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
6344 
6345       // The cost of insertelement and extractelement instructions needed for
6346       // scalarization.
6347       Cost += getScalarizationOverhead(I, VF);
6348 
6349       // Scale the cost by the probability of executing the predicated blocks.
6350       // This assumes the predicated block for each vector lane is equally
6351       // likely.
6352       return Cost / getReciprocalPredBlockProb();
6353     }
6354     LLVM_FALLTHROUGH;
6355   case Instruction::Add:
6356   case Instruction::FAdd:
6357   case Instruction::Sub:
6358   case Instruction::FSub:
6359   case Instruction::Mul:
6360   case Instruction::FMul:
6361   case Instruction::FDiv:
6362   case Instruction::FRem:
6363   case Instruction::Shl:
6364   case Instruction::LShr:
6365   case Instruction::AShr:
6366   case Instruction::And:
6367   case Instruction::Or:
6368   case Instruction::Xor: {
6369     // Since we will replace the stride by 1 the multiplication should go away.
6370     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
6371       return 0;
6372     // Certain instructions can be cheaper to vectorize if they have a constant
6373     // second vector operand. One example of this are shifts on x86.
6374     Value *Op2 = I->getOperand(1);
6375     TargetTransformInfo::OperandValueProperties Op2VP;
6376     TargetTransformInfo::OperandValueKind Op2VK =
6377         TTI.getOperandInfo(Op2, Op2VP);
6378     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
6379       Op2VK = TargetTransformInfo::OK_UniformValue;
6380 
6381     SmallVector<const Value *, 4> Operands(I->operand_values());
6382     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6383     return N * TTI.getArithmeticInstrCost(
6384                    I->getOpcode(), VectorTy, CostKind,
6385                    TargetTransformInfo::OK_AnyValue,
6386                    Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
6387   }
6388   case Instruction::FNeg: {
6389     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6390     return N * TTI.getArithmeticInstrCost(
6391                    I->getOpcode(), VectorTy, CostKind,
6392                    TargetTransformInfo::OK_AnyValue,
6393                    TargetTransformInfo::OK_AnyValue,
6394                    TargetTransformInfo::OP_None, TargetTransformInfo::OP_None,
6395                    I->getOperand(0), I);
6396   }
6397   case Instruction::Select: {
6398     SelectInst *SI = cast<SelectInst>(I);
6399     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
6400     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
6401     Type *CondTy = SI->getCondition()->getType();
6402     if (!ScalarCond)
6403       CondTy = FixedVectorType::get(CondTy, VF);
6404 
6405     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy,
6406                                   CostKind, I);
6407   }
6408   case Instruction::ICmp:
6409   case Instruction::FCmp: {
6410     Type *ValTy = I->getOperand(0)->getType();
6411     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
6412     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
6413       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
6414     VectorTy = ToVectorTy(ValTy, VF);
6415     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, CostKind,
6416                                   I);
6417   }
6418   case Instruction::Store:
6419   case Instruction::Load: {
6420     unsigned Width = VF;
6421     if (Width > 1) {
6422       InstWidening Decision = getWideningDecision(I, Width);
6423       assert(Decision != CM_Unknown &&
6424              "CM decision should be taken at this point");
6425       if (Decision == CM_Scalarize)
6426         Width = 1;
6427     }
6428     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
6429     return getMemoryInstructionCost(I, VF);
6430   }
6431   case Instruction::ZExt:
6432   case Instruction::SExt:
6433   case Instruction::FPToUI:
6434   case Instruction::FPToSI:
6435   case Instruction::FPExt:
6436   case Instruction::PtrToInt:
6437   case Instruction::IntToPtr:
6438   case Instruction::SIToFP:
6439   case Instruction::UIToFP:
6440   case Instruction::Trunc:
6441   case Instruction::FPTrunc:
6442   case Instruction::BitCast: {
6443     // We optimize the truncation of induction variables having constant
6444     // integer steps. The cost of these truncations is the same as the scalar
6445     // operation.
6446     if (isOptimizableIVTruncate(I, VF)) {
6447       auto *Trunc = cast<TruncInst>(I);
6448       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6449                                   Trunc->getSrcTy(), CostKind, Trunc);
6450     }
6451 
6452     Type *SrcScalarTy = I->getOperand(0)->getType();
6453     Type *SrcVecTy =
6454         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6455     if (canTruncateToMinimalBitwidth(I, VF)) {
6456       // This cast is going to be shrunk. This may remove the cast or it might
6457       // turn it into slightly different cast. For example, if MinBW == 16,
6458       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
6459       //
6460       // Calculate the modified src and dest types.
6461       Type *MinVecTy = VectorTy;
6462       if (I->getOpcode() == Instruction::Trunc) {
6463         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
6464         VectorTy =
6465             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6466       } else if (I->getOpcode() == Instruction::ZExt ||
6467                  I->getOpcode() == Instruction::SExt) {
6468         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
6469         VectorTy =
6470             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6471       }
6472     }
6473 
6474     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6475     return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy,
6476                                     CostKind, I);
6477   }
6478   case Instruction::Call: {
6479     bool NeedToScalarize;
6480     CallInst *CI = cast<CallInst>(I);
6481     unsigned CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
6482     if (getVectorIntrinsicIDForCall(CI, TLI))
6483       return std::min(CallCost, getVectorIntrinsicCost(CI, VF));
6484     return CallCost;
6485   }
6486   default:
6487     // The cost of executing VF copies of the scalar instruction. This opcode
6488     // is unknown. Assume that it is the same as 'mul'.
6489     return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy,
6490                                            CostKind) +
6491            getScalarizationOverhead(I, VF);
6492   } // end of switch.
6493 }
6494 
6495 char LoopVectorize::ID = 0;
6496 
6497 static const char lv_name[] = "Loop Vectorization";
6498 
6499 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
6500 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
6501 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
6502 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
6503 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
6504 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
6505 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
6506 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
6507 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
6508 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
6509 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
6510 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
6511 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
6512 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
6513 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
6514 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
6515 
6516 namespace llvm {
6517 
6518 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
6519 
6520 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
6521                               bool VectorizeOnlyWhenForced) {
6522   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
6523 }
6524 
6525 } // end namespace llvm
6526 
6527 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
6528   // Check if the pointer operand of a load or store instruction is
6529   // consecutive.
6530   if (auto *Ptr = getLoadStorePointerOperand(Inst))
6531     return Legal->isConsecutivePtr(Ptr);
6532   return false;
6533 }
6534 
6535 void LoopVectorizationCostModel::collectValuesToIgnore() {
6536   // Ignore ephemeral values.
6537   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
6538 
6539   // Ignore type-promoting instructions we identified during reduction
6540   // detection.
6541   for (auto &Reduction : Legal->getReductionVars()) {
6542     RecurrenceDescriptor &RedDes = Reduction.second;
6543     SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6544     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6545   }
6546   // Ignore type-casting instructions we identified during induction
6547   // detection.
6548   for (auto &Induction : Legal->getInductionVars()) {
6549     InductionDescriptor &IndDes = Induction.second;
6550     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6551     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6552   }
6553 }
6554 
6555 // TODO: we could return a pair of values that specify the max VF and
6556 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
6557 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
6558 // doesn't have a cost model that can choose which plan to execute if
6559 // more than one is generated.
6560 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
6561                                  LoopVectorizationCostModel &CM) {
6562   unsigned WidestType;
6563   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
6564   return WidestVectorRegBits / WidestType;
6565 }
6566 
6567 VectorizationFactor
6568 LoopVectorizationPlanner::planInVPlanNativePath(unsigned UserVF) {
6569   unsigned VF = UserVF;
6570   // Outer loop handling: They may require CFG and instruction level
6571   // transformations before even evaluating whether vectorization is profitable.
6572   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6573   // the vectorization pipeline.
6574   if (!OrigLoop->empty()) {
6575     // If the user doesn't provide a vectorization factor, determine a
6576     // reasonable one.
6577     if (!UserVF) {
6578       VF = determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM);
6579       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
6580 
6581       // Make sure we have a VF > 1 for stress testing.
6582       if (VPlanBuildStressTest && VF < 2) {
6583         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
6584                           << "overriding computed VF.\n");
6585         VF = 4;
6586       }
6587     }
6588     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6589     assert(isPowerOf2_32(VF) && "VF needs to be a power of two");
6590     LLVM_DEBUG(dbgs() << "LV: Using " << (UserVF ? "user " : "") << "VF " << VF
6591                       << " to build VPlans.\n");
6592     buildVPlans(VF, VF);
6593 
6594     // For VPlan build stress testing, we bail out after VPlan construction.
6595     if (VPlanBuildStressTest)
6596       return VectorizationFactor::Disabled();
6597 
6598     return {VF, 0};
6599   }
6600 
6601   LLVM_DEBUG(
6602       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6603                 "VPlan-native path.\n");
6604   return VectorizationFactor::Disabled();
6605 }
6606 
6607 Optional<VectorizationFactor> LoopVectorizationPlanner::plan(unsigned UserVF,
6608                                                              unsigned UserIC) {
6609   assert(OrigLoop->empty() && "Inner loop expected.");
6610   Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC);
6611   if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved.
6612     return None;
6613 
6614   // Invalidate interleave groups if all blocks of loop will be predicated.
6615   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
6616       !useMaskedInterleavedAccesses(*TTI)) {
6617     LLVM_DEBUG(
6618         dbgs()
6619         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
6620            "which requires masked-interleaved support.\n");
6621     if (CM.InterleaveInfo.invalidateGroups())
6622       // Invalidating interleave groups also requires invalidating all decisions
6623       // based on them, which includes widening decisions and uniform and scalar
6624       // values.
6625       CM.invalidateCostModelingDecisions();
6626   }
6627 
6628   if (UserVF) {
6629     LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6630     assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
6631     // Collect the instructions (and their associated costs) that will be more
6632     // profitable to scalarize.
6633     CM.selectUserVectorizationFactor(UserVF);
6634     buildVPlansWithVPRecipes(UserVF, UserVF);
6635     LLVM_DEBUG(printPlans(dbgs()));
6636     return {{UserVF, 0}};
6637   }
6638 
6639   unsigned MaxVF = MaybeMaxVF.getValue();
6640   assert(MaxVF != 0 && "MaxVF is zero.");
6641 
6642   for (unsigned VF = 1; VF <= MaxVF; VF *= 2) {
6643     // Collect Uniform and Scalar instructions after vectorization with VF.
6644     CM.collectUniformsAndScalars(VF);
6645 
6646     // Collect the instructions (and their associated costs) that will be more
6647     // profitable to scalarize.
6648     if (VF > 1)
6649       CM.collectInstsToScalarize(VF);
6650   }
6651 
6652   buildVPlansWithVPRecipes(1, MaxVF);
6653   LLVM_DEBUG(printPlans(dbgs()));
6654   if (MaxVF == 1)
6655     return VectorizationFactor::Disabled();
6656 
6657   // Select the optimal vectorization factor.
6658   return CM.selectVectorizationFactor(MaxVF);
6659 }
6660 
6661 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) {
6662   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
6663                     << '\n');
6664   BestVF = VF;
6665   BestUF = UF;
6666 
6667   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
6668     return !Plan->hasVF(VF);
6669   });
6670   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
6671 }
6672 
6673 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
6674                                            DominatorTree *DT) {
6675   // Perform the actual loop transformation.
6676 
6677   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
6678   VPCallbackILV CallbackILV(ILV);
6679 
6680   VPTransformState State{BestVF, BestUF,      LI,
6681                          DT,     ILV.Builder, ILV.VectorLoopValueMap,
6682                          &ILV,   CallbackILV};
6683   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
6684   State.TripCount = ILV.getOrCreateTripCount(nullptr);
6685   State.CanonicalIV = ILV.Induction;
6686 
6687   //===------------------------------------------------===//
6688   //
6689   // Notice: any optimization or new instruction that go
6690   // into the code below should also be implemented in
6691   // the cost-model.
6692   //
6693   //===------------------------------------------------===//
6694 
6695   // 2. Copy and widen instructions from the old loop into the new loop.
6696   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
6697   VPlans.front()->execute(&State);
6698 
6699   // 3. Fix the vectorized code: take care of header phi's, live-outs,
6700   //    predication, updating analyses.
6701   ILV.fixVectorizedLoop();
6702 }
6703 
6704 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
6705     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
6706   BasicBlock *Latch = OrigLoop->getLoopLatch();
6707 
6708   // We create new control-flow for the vectorized loop, so the original
6709   // condition will be dead after vectorization if it's only used by the
6710   // branch.
6711   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
6712   if (Cmp && Cmp->hasOneUse())
6713     DeadInstructions.insert(Cmp);
6714 
6715   // We create new "steps" for induction variable updates to which the original
6716   // induction variables map. An original update instruction will be dead if
6717   // all its users except the induction variable are dead.
6718   for (auto &Induction : Legal->getInductionVars()) {
6719     PHINode *Ind = Induction.first;
6720     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
6721     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
6722           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
6723         }))
6724       DeadInstructions.insert(IndUpdate);
6725 
6726     // We record as "Dead" also the type-casting instructions we had identified
6727     // during induction analysis. We don't need any handling for them in the
6728     // vectorized loop because we have proven that, under a proper runtime
6729     // test guarding the vectorized loop, the value of the phi, and the casted
6730     // value of the phi, are the same. The last instruction in this casting chain
6731     // will get its scalar/vector/widened def from the scalar/vector/widened def
6732     // of the respective phi node. Any other casts in the induction def-use chain
6733     // have no other uses outside the phi update chain, and will be ignored.
6734     InductionDescriptor &IndDes = Induction.second;
6735     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6736     DeadInstructions.insert(Casts.begin(), Casts.end());
6737   }
6738 }
6739 
6740 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
6741 
6742 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
6743 
6744 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
6745                                         Instruction::BinaryOps BinOp) {
6746   // When unrolling and the VF is 1, we only need to add a simple scalar.
6747   Type *Ty = Val->getType();
6748   assert(!Ty->isVectorTy() && "Val must be a scalar");
6749 
6750   if (Ty->isFloatingPointTy()) {
6751     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
6752 
6753     // Floating point operations had to be 'fast' to enable the unrolling.
6754     Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
6755     return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
6756   }
6757   Constant *C = ConstantInt::get(Ty, StartIdx);
6758   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
6759 }
6760 
6761 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
6762   SmallVector<Metadata *, 4> MDs;
6763   // Reserve first location for self reference to the LoopID metadata node.
6764   MDs.push_back(nullptr);
6765   bool IsUnrollMetadata = false;
6766   MDNode *LoopID = L->getLoopID();
6767   if (LoopID) {
6768     // First find existing loop unrolling disable metadata.
6769     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
6770       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
6771       if (MD) {
6772         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
6773         IsUnrollMetadata =
6774             S && S->getString().startswith("llvm.loop.unroll.disable");
6775       }
6776       MDs.push_back(LoopID->getOperand(i));
6777     }
6778   }
6779 
6780   if (!IsUnrollMetadata) {
6781     // Add runtime unroll disable metadata.
6782     LLVMContext &Context = L->getHeader()->getContext();
6783     SmallVector<Metadata *, 1> DisableOperands;
6784     DisableOperands.push_back(
6785         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
6786     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
6787     MDs.push_back(DisableNode);
6788     MDNode *NewLoopID = MDNode::get(Context, MDs);
6789     // Set operand 0 to refer to the loop id itself.
6790     NewLoopID->replaceOperandWith(0, NewLoopID);
6791     L->setLoopID(NewLoopID);
6792   }
6793 }
6794 
6795 bool LoopVectorizationPlanner::getDecisionAndClampRange(
6796     const std::function<bool(unsigned)> &Predicate, VFRange &Range) {
6797   assert(Range.End > Range.Start && "Trying to test an empty VF range.");
6798   bool PredicateAtRangeStart = Predicate(Range.Start);
6799 
6800   for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2)
6801     if (Predicate(TmpVF) != PredicateAtRangeStart) {
6802       Range.End = TmpVF;
6803       break;
6804     }
6805 
6806   return PredicateAtRangeStart;
6807 }
6808 
6809 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
6810 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
6811 /// of VF's starting at a given VF and extending it as much as possible. Each
6812 /// vectorization decision can potentially shorten this sub-range during
6813 /// buildVPlan().
6814 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) {
6815   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
6816     VFRange SubRange = {VF, MaxVF + 1};
6817     VPlans.push_back(buildVPlan(SubRange));
6818     VF = SubRange.End;
6819   }
6820 }
6821 
6822 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
6823                                          VPlanPtr &Plan) {
6824   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
6825 
6826   // Look for cached value.
6827   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
6828   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
6829   if (ECEntryIt != EdgeMaskCache.end())
6830     return ECEntryIt->second;
6831 
6832   VPValue *SrcMask = createBlockInMask(Src, Plan);
6833 
6834   // The terminator has to be a branch inst!
6835   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
6836   assert(BI && "Unexpected terminator found");
6837 
6838   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
6839     return EdgeMaskCache[Edge] = SrcMask;
6840 
6841   VPValue *EdgeMask = Plan->getVPValue(BI->getCondition());
6842   assert(EdgeMask && "No Edge Mask found for condition");
6843 
6844   if (BI->getSuccessor(0) != Dst)
6845     EdgeMask = Builder.createNot(EdgeMask);
6846 
6847   if (SrcMask) // Otherwise block in-mask is all-one, no need to AND.
6848     EdgeMask = Builder.createAnd(EdgeMask, SrcMask);
6849 
6850   return EdgeMaskCache[Edge] = EdgeMask;
6851 }
6852 
6853 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
6854   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
6855 
6856   // Look for cached value.
6857   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
6858   if (BCEntryIt != BlockMaskCache.end())
6859     return BCEntryIt->second;
6860 
6861   // All-one mask is modelled as no-mask following the convention for masked
6862   // load/store/gather/scatter. Initialize BlockMask to no-mask.
6863   VPValue *BlockMask = nullptr;
6864 
6865   if (OrigLoop->getHeader() == BB) {
6866     if (!CM.blockNeedsPredication(BB))
6867       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
6868 
6869     // Introduce the early-exit compare IV <= BTC to form header block mask.
6870     // This is used instead of IV < TC because TC may wrap, unlike BTC.
6871     // Start by constructing the desired canonical IV.
6872     VPValue *IV = nullptr;
6873     if (Legal->getPrimaryInduction())
6874       IV = Plan->getVPValue(Legal->getPrimaryInduction());
6875     else {
6876       auto IVRecipe = new VPWidenCanonicalIVRecipe();
6877       Builder.getInsertBlock()->appendRecipe(IVRecipe);
6878       IV = IVRecipe->getVPValue();
6879     }
6880     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
6881     bool TailFolded = !CM.isScalarEpilogueAllowed();
6882     if (TailFolded && CM.TTI.emitGetActiveLaneMask())
6883       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, BTC});
6884     else
6885       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
6886     return BlockMaskCache[BB] = BlockMask;
6887   }
6888 
6889   // This is the block mask. We OR all incoming edges.
6890   for (auto *Predecessor : predecessors(BB)) {
6891     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
6892     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
6893       return BlockMaskCache[BB] = EdgeMask;
6894 
6895     if (!BlockMask) { // BlockMask has its initialized nullptr value.
6896       BlockMask = EdgeMask;
6897       continue;
6898     }
6899 
6900     BlockMask = Builder.createOr(BlockMask, EdgeMask);
6901   }
6902 
6903   return BlockMaskCache[BB] = BlockMask;
6904 }
6905 
6906 VPWidenMemoryInstructionRecipe *
6907 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range,
6908                                   VPlanPtr &Plan) {
6909   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
6910          "Must be called with either a load or store");
6911 
6912   auto willWiden = [&](unsigned VF) -> bool {
6913     if (VF == 1)
6914       return false;
6915     LoopVectorizationCostModel::InstWidening Decision =
6916         CM.getWideningDecision(I, VF);
6917     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
6918            "CM decision should be taken at this point.");
6919     if (Decision == LoopVectorizationCostModel::CM_Interleave)
6920       return true;
6921     if (CM.isScalarAfterVectorization(I, VF) ||
6922         CM.isProfitableToScalarize(I, VF))
6923       return false;
6924     return Decision != LoopVectorizationCostModel::CM_Scalarize;
6925   };
6926 
6927   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
6928     return nullptr;
6929 
6930   VPValue *Mask = nullptr;
6931   if (Legal->isMaskRequired(I))
6932     Mask = createBlockInMask(I->getParent(), Plan);
6933 
6934   VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I));
6935   if (LoadInst *Load = dyn_cast<LoadInst>(I))
6936     return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask);
6937 
6938   StoreInst *Store = cast<StoreInst>(I);
6939   VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand());
6940   return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask);
6941 }
6942 
6943 VPWidenIntOrFpInductionRecipe *
6944 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi) const {
6945   // Check if this is an integer or fp induction. If so, build the recipe that
6946   // produces its scalar and vector values.
6947   InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
6948   if (II.getKind() == InductionDescriptor::IK_IntInduction ||
6949       II.getKind() == InductionDescriptor::IK_FpInduction)
6950     return new VPWidenIntOrFpInductionRecipe(Phi);
6951 
6952   return nullptr;
6953 }
6954 
6955 VPWidenIntOrFpInductionRecipe *
6956 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I,
6957                                                 VFRange &Range) const {
6958   // Optimize the special case where the source is a constant integer
6959   // induction variable. Notice that we can only optimize the 'trunc' case
6960   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
6961   // (c) other casts depend on pointer size.
6962 
6963   // Determine whether \p K is a truncation based on an induction variable that
6964   // can be optimized.
6965   auto isOptimizableIVTruncate =
6966       [&](Instruction *K) -> std::function<bool(unsigned)> {
6967     return
6968         [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); };
6969   };
6970 
6971   if (LoopVectorizationPlanner::getDecisionAndClampRange(
6972           isOptimizableIVTruncate(I), Range))
6973     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
6974                                              I);
6975   return nullptr;
6976 }
6977 
6978 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) {
6979   // We know that all PHIs in non-header blocks are converted into selects, so
6980   // we don't have to worry about the insertion order and we can just use the
6981   // builder. At this point we generate the predication tree. There may be
6982   // duplications since this is a simple recursive scan, but future
6983   // optimizations will clean it up.
6984 
6985   SmallVector<VPValue *, 2> Operands;
6986   unsigned NumIncoming = Phi->getNumIncomingValues();
6987   for (unsigned In = 0; In < NumIncoming; In++) {
6988     VPValue *EdgeMask =
6989       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
6990     assert((EdgeMask || NumIncoming == 1) &&
6991            "Multiple predecessors with one having a full mask");
6992     Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In)));
6993     if (EdgeMask)
6994       Operands.push_back(EdgeMask);
6995   }
6996   return new VPBlendRecipe(Phi, Operands);
6997 }
6998 
6999 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range,
7000                                                    VPlan &Plan) const {
7001 
7002   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
7003       [this, CI](unsigned VF) { return CM.isScalarWithPredication(CI, VF); },
7004       Range);
7005 
7006   if (IsPredicated)
7007     return nullptr;
7008 
7009   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
7010   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
7011              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect))
7012     return nullptr;
7013 
7014   auto willWiden = [&](unsigned VF) -> bool {
7015     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
7016     // The following case may be scalarized depending on the VF.
7017     // The flag shows whether we use Intrinsic or a usual Call for vectorized
7018     // version of the instruction.
7019     // Is it beneficial to perform intrinsic call compared to lib call?
7020     bool NeedToScalarize = false;
7021     unsigned CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
7022     bool UseVectorIntrinsic =
7023         ID && CM.getVectorIntrinsicCost(CI, VF) <= CallCost;
7024     return UseVectorIntrinsic || !NeedToScalarize;
7025   };
7026 
7027   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
7028     return nullptr;
7029 
7030   return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands()));
7031 }
7032 
7033 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
7034   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
7035          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
7036   // Instruction should be widened, unless it is scalar after vectorization,
7037   // scalarization is profitable or it is predicated.
7038   auto WillScalarize = [this, I](unsigned VF) -> bool {
7039     return CM.isScalarAfterVectorization(I, VF) ||
7040            CM.isProfitableToScalarize(I, VF) ||
7041            CM.isScalarWithPredication(I, VF);
7042   };
7043   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
7044                                                              Range);
7045 }
7046 
7047 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const {
7048   auto IsVectorizableOpcode = [](unsigned Opcode) {
7049     switch (Opcode) {
7050     case Instruction::Add:
7051     case Instruction::And:
7052     case Instruction::AShr:
7053     case Instruction::BitCast:
7054     case Instruction::FAdd:
7055     case Instruction::FCmp:
7056     case Instruction::FDiv:
7057     case Instruction::FMul:
7058     case Instruction::FNeg:
7059     case Instruction::FPExt:
7060     case Instruction::FPToSI:
7061     case Instruction::FPToUI:
7062     case Instruction::FPTrunc:
7063     case Instruction::FRem:
7064     case Instruction::FSub:
7065     case Instruction::ICmp:
7066     case Instruction::IntToPtr:
7067     case Instruction::LShr:
7068     case Instruction::Mul:
7069     case Instruction::Or:
7070     case Instruction::PtrToInt:
7071     case Instruction::SDiv:
7072     case Instruction::Select:
7073     case Instruction::SExt:
7074     case Instruction::Shl:
7075     case Instruction::SIToFP:
7076     case Instruction::SRem:
7077     case Instruction::Sub:
7078     case Instruction::Trunc:
7079     case Instruction::UDiv:
7080     case Instruction::UIToFP:
7081     case Instruction::URem:
7082     case Instruction::Xor:
7083     case Instruction::ZExt:
7084       return true;
7085     }
7086     return false;
7087   };
7088 
7089   if (!IsVectorizableOpcode(I->getOpcode()))
7090     return nullptr;
7091 
7092   // Success: widen this instruction.
7093   return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands()));
7094 }
7095 
7096 VPBasicBlock *VPRecipeBuilder::handleReplication(
7097     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
7098     DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
7099     VPlanPtr &Plan) {
7100   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
7101       [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); },
7102       Range);
7103 
7104   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
7105       [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range);
7106 
7107   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
7108                                        IsUniform, IsPredicated);
7109   setRecipe(I, Recipe);
7110 
7111   // Find if I uses a predicated instruction. If so, it will use its scalar
7112   // value. Avoid hoisting the insert-element which packs the scalar value into
7113   // a vector value, as that happens iff all users use the vector value.
7114   for (auto &Op : I->operands())
7115     if (auto *PredInst = dyn_cast<Instruction>(Op))
7116       if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
7117         PredInst2Recipe[PredInst]->setAlsoPack(false);
7118 
7119   // Finalize the recipe for Instr, first if it is not predicated.
7120   if (!IsPredicated) {
7121     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
7122     VPBB->appendRecipe(Recipe);
7123     return VPBB;
7124   }
7125   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
7126   assert(VPBB->getSuccessors().empty() &&
7127          "VPBB has successors when handling predicated replication.");
7128   // Record predicated instructions for above packing optimizations.
7129   PredInst2Recipe[I] = Recipe;
7130   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
7131   VPBlockUtils::insertBlockAfter(Region, VPBB);
7132   auto *RegSucc = new VPBasicBlock();
7133   VPBlockUtils::insertBlockAfter(RegSucc, Region);
7134   return RegSucc;
7135 }
7136 
7137 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
7138                                                       VPRecipeBase *PredRecipe,
7139                                                       VPlanPtr &Plan) {
7140   // Instructions marked for predication are replicated and placed under an
7141   // if-then construct to prevent side-effects.
7142 
7143   // Generate recipes to compute the block mask for this region.
7144   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
7145 
7146   // Build the triangular if-then region.
7147   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
7148   assert(Instr->getParent() && "Predicated instruction not in any basic block");
7149   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
7150   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
7151   auto *PHIRecipe =
7152       Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr);
7153   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
7154   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
7155   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
7156 
7157   // Note: first set Entry as region entry and then connect successors starting
7158   // from it in order, to propagate the "parent" of each VPBasicBlock.
7159   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
7160   VPBlockUtils::connectBlocks(Pred, Exit);
7161 
7162   return Region;
7163 }
7164 
7165 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
7166                                                       VFRange &Range,
7167                                                       VPlanPtr &Plan) {
7168   // First, check for specific widening recipes that deal with calls, memory
7169   // operations, inductions and Phi nodes.
7170   if (auto *CI = dyn_cast<CallInst>(Instr))
7171     return tryToWidenCall(CI, Range, *Plan);
7172 
7173   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
7174     return tryToWidenMemory(Instr, Range, Plan);
7175 
7176   VPRecipeBase *Recipe;
7177   if (auto Phi = dyn_cast<PHINode>(Instr)) {
7178     if (Phi->getParent() != OrigLoop->getHeader())
7179       return tryToBlend(Phi, Plan);
7180     if ((Recipe = tryToOptimizeInductionPHI(Phi)))
7181       return Recipe;
7182     return new VPWidenPHIRecipe(Phi);
7183   }
7184 
7185   if (isa<TruncInst>(Instr) &&
7186       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Range)))
7187     return Recipe;
7188 
7189   if (!shouldWiden(Instr, Range))
7190     return nullptr;
7191 
7192   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
7193     return new VPWidenGEPRecipe(GEP, Plan->mapToVPValues(GEP->operands()),
7194                                 OrigLoop);
7195 
7196   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
7197     bool InvariantCond =
7198         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
7199     return new VPWidenSelectRecipe(*SI, Plan->mapToVPValues(SI->operands()),
7200                                    InvariantCond);
7201   }
7202 
7203   return tryToWiden(Instr, *Plan);
7204 }
7205 
7206 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF,
7207                                                         unsigned MaxVF) {
7208   assert(OrigLoop->empty() && "Inner loop expected.");
7209 
7210   // Collect conditions feeding internal conditional branches; they need to be
7211   // represented in VPlan for it to model masking.
7212   SmallPtrSet<Value *, 1> NeedDef;
7213 
7214   auto *Latch = OrigLoop->getLoopLatch();
7215   for (BasicBlock *BB : OrigLoop->blocks()) {
7216     if (BB == Latch)
7217       continue;
7218     BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator());
7219     if (Branch && Branch->isConditional())
7220       NeedDef.insert(Branch->getCondition());
7221   }
7222 
7223   // If the tail is to be folded by masking, the primary induction variable, if
7224   // exists needs to be represented in VPlan for it to model early-exit masking.
7225   // Also, both the Phi and the live-out instruction of each reduction are
7226   // required in order to introduce a select between them in VPlan.
7227   if (CM.foldTailByMasking()) {
7228     if (Legal->getPrimaryInduction())
7229       NeedDef.insert(Legal->getPrimaryInduction());
7230     for (auto &Reduction : Legal->getReductionVars()) {
7231       NeedDef.insert(Reduction.first);
7232       NeedDef.insert(Reduction.second.getLoopExitInstr());
7233     }
7234   }
7235 
7236   // Collect instructions from the original loop that will become trivially dead
7237   // in the vectorized loop. We don't need to vectorize these instructions. For
7238   // example, original induction update instructions can become dead because we
7239   // separately emit induction "steps" when generating code for the new loop.
7240   // Similarly, we create a new latch condition when setting up the structure
7241   // of the new loop, so the old one can become dead.
7242   SmallPtrSet<Instruction *, 4> DeadInstructions;
7243   collectTriviallyDeadInstructions(DeadInstructions);
7244 
7245   // Add assume instructions we need to drop to DeadInstructions, to prevent
7246   // them from being added to the VPlan.
7247   // TODO: We only need to drop assumes in blocks that get flattend. If the
7248   // control flow is preserved, we should keep them.
7249   auto &ConditionalAssumes = Legal->getConditionalAssumes();
7250   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
7251 
7252   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
7253   // Dead instructions do not need sinking. Remove them from SinkAfter.
7254   for (Instruction *I : DeadInstructions)
7255     SinkAfter.erase(I);
7256 
7257   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
7258     VFRange SubRange = {VF, MaxVF + 1};
7259     VPlans.push_back(buildVPlanWithVPRecipes(SubRange, NeedDef,
7260                                              DeadInstructions, SinkAfter));
7261     VF = SubRange.End;
7262   }
7263 }
7264 
7265 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
7266     VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef,
7267     SmallPtrSetImpl<Instruction *> &DeadInstructions,
7268     const DenseMap<Instruction *, Instruction *> &SinkAfter) {
7269 
7270   // Hold a mapping from predicated instructions to their recipes, in order to
7271   // fix their AlsoPack behavior if a user is determined to replicate and use a
7272   // scalar instead of vector value.
7273   DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
7274 
7275   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
7276 
7277   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
7278 
7279   // ---------------------------------------------------------------------------
7280   // Pre-construction: record ingredients whose recipes we'll need to further
7281   // process after constructing the initial VPlan.
7282   // ---------------------------------------------------------------------------
7283 
7284   // Mark instructions we'll need to sink later and their targets as
7285   // ingredients whose recipe we'll need to record.
7286   for (auto &Entry : SinkAfter) {
7287     RecipeBuilder.recordRecipeOf(Entry.first);
7288     RecipeBuilder.recordRecipeOf(Entry.second);
7289   }
7290 
7291   // For each interleave group which is relevant for this (possibly trimmed)
7292   // Range, add it to the set of groups to be later applied to the VPlan and add
7293   // placeholders for its members' Recipes which we'll be replacing with a
7294   // single VPInterleaveRecipe.
7295   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
7296     auto applyIG = [IG, this](unsigned VF) -> bool {
7297       return (VF >= 2 && // Query is illegal for VF == 1
7298               CM.getWideningDecision(IG->getInsertPos(), VF) ==
7299                   LoopVectorizationCostModel::CM_Interleave);
7300     };
7301     if (!getDecisionAndClampRange(applyIG, Range))
7302       continue;
7303     InterleaveGroups.insert(IG);
7304     for (unsigned i = 0; i < IG->getFactor(); i++)
7305       if (Instruction *Member = IG->getMember(i))
7306         RecipeBuilder.recordRecipeOf(Member);
7307   };
7308 
7309   // ---------------------------------------------------------------------------
7310   // Build initial VPlan: Scan the body of the loop in a topological order to
7311   // visit each basic block after having visited its predecessor basic blocks.
7312   // ---------------------------------------------------------------------------
7313 
7314   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
7315   auto Plan = std::make_unique<VPlan>();
7316   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
7317   Plan->setEntry(VPBB);
7318 
7319   // Represent values that will have defs inside VPlan.
7320   for (Value *V : NeedDef)
7321     Plan->addVPValue(V);
7322 
7323   // Scan the body of the loop in a topological order to visit each basic block
7324   // after having visited its predecessor basic blocks.
7325   LoopBlocksDFS DFS(OrigLoop);
7326   DFS.perform(LI);
7327 
7328   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
7329     // Relevant instructions from basic block BB will be grouped into VPRecipe
7330     // ingredients and fill a new VPBasicBlock.
7331     unsigned VPBBsForBB = 0;
7332     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
7333     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
7334     VPBB = FirstVPBBForBB;
7335     Builder.setInsertPoint(VPBB);
7336 
7337     // Introduce each ingredient into VPlan.
7338     // TODO: Model and preserve debug instrinsics in VPlan.
7339     for (Instruction &I : BB->instructionsWithoutDebug()) {
7340       Instruction *Instr = &I;
7341 
7342       // First filter out irrelevant instructions, to ensure no recipes are
7343       // built for them.
7344       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
7345         continue;
7346 
7347       if (auto Recipe =
7348               RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) {
7349         RecipeBuilder.setRecipe(Instr, Recipe);
7350         VPBB->appendRecipe(Recipe);
7351         continue;
7352       }
7353 
7354       // Otherwise, if all widening options failed, Instruction is to be
7355       // replicated. This may create a successor for VPBB.
7356       VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication(
7357           Instr, Range, VPBB, PredInst2Recipe, Plan);
7358       if (NextVPBB != VPBB) {
7359         VPBB = NextVPBB;
7360         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
7361                                     : "");
7362       }
7363     }
7364   }
7365 
7366   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
7367   // may also be empty, such as the last one VPBB, reflecting original
7368   // basic-blocks with no recipes.
7369   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
7370   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
7371   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
7372   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
7373   delete PreEntry;
7374 
7375   // ---------------------------------------------------------------------------
7376   // Transform initial VPlan: Apply previously taken decisions, in order, to
7377   // bring the VPlan to its final state.
7378   // ---------------------------------------------------------------------------
7379 
7380   // Apply Sink-After legal constraints.
7381   for (auto &Entry : SinkAfter) {
7382     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
7383     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
7384     Sink->moveAfter(Target);
7385   }
7386 
7387   // Interleave memory: for each Interleave Group we marked earlier as relevant
7388   // for this VPlan, replace the Recipes widening its memory instructions with a
7389   // single VPInterleaveRecipe at its insertion point.
7390   for (auto IG : InterleaveGroups) {
7391     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
7392         RecipeBuilder.getRecipe(IG->getInsertPos()));
7393     (new VPInterleaveRecipe(IG, Recipe->getAddr(), Recipe->getMask()))
7394         ->insertBefore(Recipe);
7395 
7396     for (unsigned i = 0; i < IG->getFactor(); ++i)
7397       if (Instruction *Member = IG->getMember(i)) {
7398         RecipeBuilder.getRecipe(Member)->eraseFromParent();
7399       }
7400   }
7401 
7402   // Finally, if tail is folded by masking, introduce selects between the phi
7403   // and the live-out instruction of each reduction, at the end of the latch.
7404   if (CM.foldTailByMasking()) {
7405     Builder.setInsertPoint(VPBB);
7406     auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
7407     for (auto &Reduction : Legal->getReductionVars()) {
7408       VPValue *Phi = Plan->getVPValue(Reduction.first);
7409       VPValue *Red = Plan->getVPValue(Reduction.second.getLoopExitInstr());
7410       Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
7411     }
7412   }
7413 
7414   std::string PlanName;
7415   raw_string_ostream RSO(PlanName);
7416   unsigned VF = Range.Start;
7417   Plan->addVF(VF);
7418   RSO << "Initial VPlan for VF={" << VF;
7419   for (VF *= 2; VF < Range.End; VF *= 2) {
7420     Plan->addVF(VF);
7421     RSO << "," << VF;
7422   }
7423   RSO << "},UF>=1";
7424   RSO.flush();
7425   Plan->setName(PlanName);
7426 
7427   return Plan;
7428 }
7429 
7430 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
7431   // Outer loop handling: They may require CFG and instruction level
7432   // transformations before even evaluating whether vectorization is profitable.
7433   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7434   // the vectorization pipeline.
7435   assert(!OrigLoop->empty());
7436   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7437 
7438   // Create new empty VPlan
7439   auto Plan = std::make_unique<VPlan>();
7440 
7441   // Build hierarchical CFG
7442   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
7443   HCFGBuilder.buildHierarchicalCFG();
7444 
7445   for (unsigned VF = Range.Start; VF < Range.End; VF *= 2)
7446     Plan->addVF(VF);
7447 
7448   if (EnableVPlanPredication) {
7449     VPlanPredicator VPP(*Plan);
7450     VPP.predicate();
7451 
7452     // Avoid running transformation to recipes until masked code generation in
7453     // VPlan-native path is in place.
7454     return Plan;
7455   }
7456 
7457   SmallPtrSet<Instruction *, 1> DeadInstructions;
7458   VPlanTransforms::VPInstructionsToVPRecipes(
7459       OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions);
7460   return Plan;
7461 }
7462 
7463 Value* LoopVectorizationPlanner::VPCallbackILV::
7464 getOrCreateVectorValues(Value *V, unsigned Part) {
7465       return ILV.getOrCreateVectorValue(V, Part);
7466 }
7467 
7468 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue(
7469     Value *V, const VPIteration &Instance) {
7470   return ILV.getOrCreateScalarValue(V, Instance);
7471 }
7472 
7473 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
7474                                VPSlotTracker &SlotTracker) const {
7475   O << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
7476   IG->getInsertPos()->printAsOperand(O, false);
7477   O << ", ";
7478   getAddr()->printAsOperand(O, SlotTracker);
7479   VPValue *Mask = getMask();
7480   if (Mask) {
7481     O << ", ";
7482     Mask->printAsOperand(O, SlotTracker);
7483   }
7484   for (unsigned i = 0; i < IG->getFactor(); ++i)
7485     if (Instruction *I = IG->getMember(i))
7486       O << "\\l\" +\n" << Indent << "\"  " << VPlanIngredient(I) << " " << i;
7487 }
7488 
7489 void VPWidenCallRecipe::execute(VPTransformState &State) {
7490   State.ILV->widenCallInstruction(Ingredient, User, State);
7491 }
7492 
7493 void VPWidenSelectRecipe::execute(VPTransformState &State) {
7494   State.ILV->widenSelectInstruction(Ingredient, User, InvariantCond, State);
7495 }
7496 
7497 void VPWidenRecipe::execute(VPTransformState &State) {
7498   State.ILV->widenInstruction(Ingredient, User, State);
7499 }
7500 
7501 void VPWidenGEPRecipe::execute(VPTransformState &State) {
7502   State.ILV->widenGEP(GEP, User, State.UF, State.VF, IsPtrLoopInvariant,
7503                       IsIndexLoopInvariant, State);
7504 }
7505 
7506 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
7507   assert(!State.Instance && "Int or FP induction being replicated.");
7508   State.ILV->widenIntOrFpInduction(IV, Trunc);
7509 }
7510 
7511 void VPWidenPHIRecipe::execute(VPTransformState &State) {
7512   State.ILV->widenPHIInstruction(Phi, State.UF, State.VF);
7513 }
7514 
7515 void VPBlendRecipe::execute(VPTransformState &State) {
7516   State.ILV->setDebugLocFromInst(State.Builder, Phi);
7517   // We know that all PHIs in non-header blocks are converted into
7518   // selects, so we don't have to worry about the insertion order and we
7519   // can just use the builder.
7520   // At this point we generate the predication tree. There may be
7521   // duplications since this is a simple recursive scan, but future
7522   // optimizations will clean it up.
7523 
7524   unsigned NumIncoming = getNumIncomingValues();
7525 
7526   // Generate a sequence of selects of the form:
7527   // SELECT(Mask3, In3,
7528   //        SELECT(Mask2, In2,
7529   //               SELECT(Mask1, In1,
7530   //                      In0)))
7531   // Note that Mask0 is never used: lanes for which no path reaches this phi and
7532   // are essentially undef are taken from In0.
7533   InnerLoopVectorizer::VectorParts Entry(State.UF);
7534   for (unsigned In = 0; In < NumIncoming; ++In) {
7535     for (unsigned Part = 0; Part < State.UF; ++Part) {
7536       // We might have single edge PHIs (blocks) - use an identity
7537       // 'select' for the first PHI operand.
7538       Value *In0 = State.get(getIncomingValue(In), Part);
7539       if (In == 0)
7540         Entry[Part] = In0; // Initialize with the first incoming value.
7541       else {
7542         // Select between the current value and the previous incoming edge
7543         // based on the incoming mask.
7544         Value *Cond = State.get(getMask(In), Part);
7545         Entry[Part] =
7546             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
7547       }
7548     }
7549   }
7550   for (unsigned Part = 0; Part < State.UF; ++Part)
7551     State.ValueMap.setVectorValue(Phi, Part, Entry[Part]);
7552 }
7553 
7554 void VPInterleaveRecipe::execute(VPTransformState &State) {
7555   assert(!State.Instance && "Interleave group being replicated.");
7556   State.ILV->vectorizeInterleaveGroup(IG, State, getAddr(), getMask());
7557 }
7558 
7559 void VPReplicateRecipe::execute(VPTransformState &State) {
7560   if (State.Instance) { // Generate a single instance.
7561     State.ILV->scalarizeInstruction(Ingredient, User, *State.Instance,
7562                                     IsPredicated, State);
7563     // Insert scalar instance packing it into a vector.
7564     if (AlsoPack && State.VF > 1) {
7565       // If we're constructing lane 0, initialize to start from undef.
7566       if (State.Instance->Lane == 0) {
7567         Value *Undef = UndefValue::get(
7568             FixedVectorType::get(Ingredient->getType(), State.VF));
7569         State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef);
7570       }
7571       State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance);
7572     }
7573     return;
7574   }
7575 
7576   // Generate scalar instances for all VF lanes of all UF parts, unless the
7577   // instruction is uniform inwhich case generate only the first lane for each
7578   // of the UF parts.
7579   unsigned EndLane = IsUniform ? 1 : State.VF;
7580   for (unsigned Part = 0; Part < State.UF; ++Part)
7581     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
7582       State.ILV->scalarizeInstruction(Ingredient, User, {Part, Lane},
7583                                       IsPredicated, State);
7584 }
7585 
7586 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
7587   assert(State.Instance && "Branch on Mask works only on single instance.");
7588 
7589   unsigned Part = State.Instance->Part;
7590   unsigned Lane = State.Instance->Lane;
7591 
7592   Value *ConditionBit = nullptr;
7593   VPValue *BlockInMask = getMask();
7594   if (BlockInMask) {
7595     ConditionBit = State.get(BlockInMask, Part);
7596     if (ConditionBit->getType()->isVectorTy())
7597       ConditionBit = State.Builder.CreateExtractElement(
7598           ConditionBit, State.Builder.getInt32(Lane));
7599   } else // Block in mask is all-one.
7600     ConditionBit = State.Builder.getTrue();
7601 
7602   // Replace the temporary unreachable terminator with a new conditional branch,
7603   // whose two destinations will be set later when they are created.
7604   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
7605   assert(isa<UnreachableInst>(CurrentTerminator) &&
7606          "Expected to replace unreachable terminator with conditional branch.");
7607   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
7608   CondBr->setSuccessor(0, nullptr);
7609   ReplaceInstWithInst(CurrentTerminator, CondBr);
7610 }
7611 
7612 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
7613   assert(State.Instance && "Predicated instruction PHI works per instance.");
7614   Instruction *ScalarPredInst = cast<Instruction>(
7615       State.ValueMap.getScalarValue(PredInst, *State.Instance));
7616   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
7617   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
7618   assert(PredicatingBB && "Predicated block has no single predecessor.");
7619 
7620   // By current pack/unpack logic we need to generate only a single phi node: if
7621   // a vector value for the predicated instruction exists at this point it means
7622   // the instruction has vector users only, and a phi for the vector value is
7623   // needed. In this case the recipe of the predicated instruction is marked to
7624   // also do that packing, thereby "hoisting" the insert-element sequence.
7625   // Otherwise, a phi node for the scalar value is needed.
7626   unsigned Part = State.Instance->Part;
7627   if (State.ValueMap.hasVectorValue(PredInst, Part)) {
7628     Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part);
7629     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
7630     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
7631     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
7632     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
7633     State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache.
7634   } else {
7635     Type *PredInstType = PredInst->getType();
7636     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
7637     Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB);
7638     Phi->addIncoming(ScalarPredInst, PredicatedBB);
7639     State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi);
7640   }
7641 }
7642 
7643 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
7644   VPValue *StoredValue = isa<StoreInst>(Instr) ? getStoredValue() : nullptr;
7645   State.ILV->vectorizeMemoryInstruction(&Instr, State, getAddr(), StoredValue,
7646                                         getMask());
7647 }
7648 
7649 // Determine how to lower the scalar epilogue, which depends on 1) optimising
7650 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
7651 // predication, and 4) a TTI hook that analyses whether the loop is suitable
7652 // for predication.
7653 static ScalarEpilogueLowering getScalarEpilogueLowering(
7654     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
7655     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
7656     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
7657     LoopVectorizationLegality &LVL) {
7658   bool OptSize =
7659       F->hasOptSize() || llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
7660                                                      PGSOQueryType::IRPass);
7661   // 1) OptSize takes precedence over all other options, i.e. if this is set,
7662   // don't look at hints or options, and don't request a scalar epilogue.
7663   if (OptSize)
7664     return CM_ScalarEpilogueNotAllowedOptSize;
7665 
7666   bool PredicateOptDisabled = PreferPredicateOverEpilog.getNumOccurrences() &&
7667                               !PreferPredicateOverEpilog;
7668 
7669   // 2) Next, if disabling predication is requested on the command line, honour
7670   // this and request a scalar epilogue.
7671   if (PredicateOptDisabled)
7672     return CM_ScalarEpilogueAllowed;
7673 
7674   // 3) and 4) look if enabling predication is requested on the command line,
7675   // with a loop hint, or if the TTI hook indicates this is profitable, request
7676   // predication .
7677   if (PreferPredicateOverEpilog ||
7678       Hints.getPredicate() == LoopVectorizeHints::FK_Enabled ||
7679       (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
7680                                         LVL.getLAI()) &&
7681        Hints.getPredicate() != LoopVectorizeHints::FK_Disabled))
7682     return CM_ScalarEpilogueNotNeededUsePredicate;
7683 
7684   return CM_ScalarEpilogueAllowed;
7685 }
7686 
7687 // Process the loop in the VPlan-native vectorization path. This path builds
7688 // VPlan upfront in the vectorization pipeline, which allows to apply
7689 // VPlan-to-VPlan transformations from the very beginning without modifying the
7690 // input LLVM IR.
7691 static bool processLoopInVPlanNativePath(
7692     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
7693     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
7694     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
7695     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
7696     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) {
7697 
7698   if (PSE.getBackedgeTakenCount() == PSE.getSE()->getCouldNotCompute()) {
7699     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
7700     return false;
7701   }
7702   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
7703   Function *F = L->getHeader()->getParent();
7704   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
7705 
7706   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
7707       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
7708 
7709   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
7710                                 &Hints, IAI);
7711   // Use the planner for outer loop vectorization.
7712   // TODO: CM is not used at this point inside the planner. Turn CM into an
7713   // optional argument if we don't need it in the future.
7714   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE);
7715 
7716   // Get user vectorization factor.
7717   const unsigned UserVF = Hints.getWidth();
7718 
7719   // Plan how to best vectorize, return the best VF and its cost.
7720   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
7721 
7722   // If we are stress testing VPlan builds, do not attempt to generate vector
7723   // code. Masked vector code generation support will follow soon.
7724   // Also, do not attempt to vectorize if no vector code will be produced.
7725   if (VPlanBuildStressTest || EnableVPlanPredication ||
7726       VectorizationFactor::Disabled() == VF)
7727     return false;
7728 
7729   LVP.setBestPlan(VF.Width, 1);
7730 
7731   InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
7732                          &CM);
7733   LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
7734                     << L->getHeader()->getParent()->getName() << "\"\n");
7735   LVP.executePlan(LB, DT);
7736 
7737   // Mark the loop as already vectorized to avoid vectorizing again.
7738   Hints.setAlreadyVectorized();
7739 
7740   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
7741   return true;
7742 }
7743 
7744 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
7745     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
7746                                !EnableLoopInterleaving),
7747       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
7748                               !EnableLoopVectorization) {}
7749 
7750 bool LoopVectorizePass::processLoop(Loop *L) {
7751   assert((EnableVPlanNativePath || L->empty()) &&
7752          "VPlan-native path is not enabled. Only process inner loops.");
7753 
7754 #ifndef NDEBUG
7755   const std::string DebugLocStr = getDebugLocString(L);
7756 #endif /* NDEBUG */
7757 
7758   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
7759                     << L->getHeader()->getParent()->getName() << "\" from "
7760                     << DebugLocStr << "\n");
7761 
7762   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
7763 
7764   LLVM_DEBUG(
7765       dbgs() << "LV: Loop hints:"
7766              << " force="
7767              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
7768                      ? "disabled"
7769                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
7770                             ? "enabled"
7771                             : "?"))
7772              << " width=" << Hints.getWidth()
7773              << " unroll=" << Hints.getInterleave() << "\n");
7774 
7775   // Function containing loop
7776   Function *F = L->getHeader()->getParent();
7777 
7778   // Looking at the diagnostic output is the only way to determine if a loop
7779   // was vectorized (other than looking at the IR or machine code), so it
7780   // is important to generate an optimization remark for each loop. Most of
7781   // these messages are generated as OptimizationRemarkAnalysis. Remarks
7782   // generated as OptimizationRemark and OptimizationRemarkMissed are
7783   // less verbose reporting vectorized loops and unvectorized loops that may
7784   // benefit from vectorization, respectively.
7785 
7786   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
7787     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
7788     return false;
7789   }
7790 
7791   PredicatedScalarEvolution PSE(*SE, *L);
7792 
7793   // Check if it is legal to vectorize the loop.
7794   LoopVectorizationRequirements Requirements(*ORE);
7795   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
7796                                 &Requirements, &Hints, DB, AC);
7797   if (!LVL.canVectorize(EnableVPlanNativePath)) {
7798     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
7799     Hints.emitRemarkWithHints();
7800     return false;
7801   }
7802 
7803   // Check the function attributes and profiles to find out if this function
7804   // should be optimized for size.
7805   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
7806       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
7807 
7808   // Entrance to the VPlan-native vectorization path. Outer loops are processed
7809   // here. They may require CFG and instruction level transformations before
7810   // even evaluating whether vectorization is profitable. Since we cannot modify
7811   // the incoming IR, we need to build VPlan upfront in the vectorization
7812   // pipeline.
7813   if (!L->empty())
7814     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
7815                                         ORE, BFI, PSI, Hints);
7816 
7817   assert(L->empty() && "Inner loop expected.");
7818 
7819   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
7820   // count by optimizing for size, to minimize overheads.
7821   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
7822   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
7823     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
7824                       << "This loop is worth vectorizing only if no scalar "
7825                       << "iteration overheads are incurred.");
7826     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
7827       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
7828     else {
7829       LLVM_DEBUG(dbgs() << "\n");
7830       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
7831     }
7832   }
7833 
7834   // Check the function attributes to see if implicit floats are allowed.
7835   // FIXME: This check doesn't seem possibly correct -- what if the loop is
7836   // an integer loop and the vector instructions selected are purely integer
7837   // vector instructions?
7838   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
7839     reportVectorizationFailure(
7840         "Can't vectorize when the NoImplicitFloat attribute is used",
7841         "loop not vectorized due to NoImplicitFloat attribute",
7842         "NoImplicitFloat", ORE, L);
7843     Hints.emitRemarkWithHints();
7844     return false;
7845   }
7846 
7847   // Check if the target supports potentially unsafe FP vectorization.
7848   // FIXME: Add a check for the type of safety issue (denormal, signaling)
7849   // for the target we're vectorizing for, to make sure none of the
7850   // additional fp-math flags can help.
7851   if (Hints.isPotentiallyUnsafe() &&
7852       TTI->isFPVectorizationPotentiallyUnsafe()) {
7853     reportVectorizationFailure(
7854         "Potentially unsafe FP op prevents vectorization",
7855         "loop not vectorized due to unsafe FP support.",
7856         "UnsafeFP", ORE, L);
7857     Hints.emitRemarkWithHints();
7858     return false;
7859   }
7860 
7861   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
7862   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
7863 
7864   // If an override option has been passed in for interleaved accesses, use it.
7865   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
7866     UseInterleaved = EnableInterleavedMemAccesses;
7867 
7868   // Analyze interleaved memory accesses.
7869   if (UseInterleaved) {
7870     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
7871   }
7872 
7873   // Use the cost model.
7874   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
7875                                 F, &Hints, IAI);
7876   CM.collectValuesToIgnore();
7877 
7878   // Use the planner for vectorization.
7879   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE);
7880 
7881   // Get user vectorization factor and interleave count.
7882   unsigned UserVF = Hints.getWidth();
7883   unsigned UserIC = Hints.getInterleave();
7884 
7885   // Plan how to best vectorize, return the best VF and its cost.
7886   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
7887 
7888   VectorizationFactor VF = VectorizationFactor::Disabled();
7889   unsigned IC = 1;
7890 
7891   if (MaybeVF) {
7892     VF = *MaybeVF;
7893     // Select the interleave count.
7894     IC = CM.selectInterleaveCount(VF.Width, VF.Cost);
7895   }
7896 
7897   // Identify the diagnostic messages that should be produced.
7898   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
7899   bool VectorizeLoop = true, InterleaveLoop = true;
7900   if (Requirements.doesNotMeet(F, L, Hints)) {
7901     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
7902                          "requirements.\n");
7903     Hints.emitRemarkWithHints();
7904     return false;
7905   }
7906 
7907   if (VF.Width == 1) {
7908     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
7909     VecDiagMsg = std::make_pair(
7910         "VectorizationNotBeneficial",
7911         "the cost-model indicates that vectorization is not beneficial");
7912     VectorizeLoop = false;
7913   }
7914 
7915   if (!MaybeVF && UserIC > 1) {
7916     // Tell the user interleaving was avoided up-front, despite being explicitly
7917     // requested.
7918     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
7919                          "interleaving should be avoided up front\n");
7920     IntDiagMsg = std::make_pair(
7921         "InterleavingAvoided",
7922         "Ignoring UserIC, because interleaving was avoided up front");
7923     InterleaveLoop = false;
7924   } else if (IC == 1 && UserIC <= 1) {
7925     // Tell the user interleaving is not beneficial.
7926     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
7927     IntDiagMsg = std::make_pair(
7928         "InterleavingNotBeneficial",
7929         "the cost-model indicates that interleaving is not beneficial");
7930     InterleaveLoop = false;
7931     if (UserIC == 1) {
7932       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
7933       IntDiagMsg.second +=
7934           " and is explicitly disabled or interleave count is set to 1";
7935     }
7936   } else if (IC > 1 && UserIC == 1) {
7937     // Tell the user interleaving is beneficial, but it explicitly disabled.
7938     LLVM_DEBUG(
7939         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
7940     IntDiagMsg = std::make_pair(
7941         "InterleavingBeneficialButDisabled",
7942         "the cost-model indicates that interleaving is beneficial "
7943         "but is explicitly disabled or interleave count is set to 1");
7944     InterleaveLoop = false;
7945   }
7946 
7947   // Override IC if user provided an interleave count.
7948   IC = UserIC > 0 ? UserIC : IC;
7949 
7950   // Emit diagnostic messages, if any.
7951   const char *VAPassName = Hints.vectorizeAnalysisPassName();
7952   if (!VectorizeLoop && !InterleaveLoop) {
7953     // Do not vectorize or interleaving the loop.
7954     ORE->emit([&]() {
7955       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
7956                                       L->getStartLoc(), L->getHeader())
7957              << VecDiagMsg.second;
7958     });
7959     ORE->emit([&]() {
7960       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
7961                                       L->getStartLoc(), L->getHeader())
7962              << IntDiagMsg.second;
7963     });
7964     return false;
7965   } else if (!VectorizeLoop && InterleaveLoop) {
7966     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7967     ORE->emit([&]() {
7968       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
7969                                         L->getStartLoc(), L->getHeader())
7970              << VecDiagMsg.second;
7971     });
7972   } else if (VectorizeLoop && !InterleaveLoop) {
7973     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
7974                       << ") in " << DebugLocStr << '\n');
7975     ORE->emit([&]() {
7976       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
7977                                         L->getStartLoc(), L->getHeader())
7978              << IntDiagMsg.second;
7979     });
7980   } else if (VectorizeLoop && InterleaveLoop) {
7981     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
7982                       << ") in " << DebugLocStr << '\n');
7983     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7984   }
7985 
7986   LVP.setBestPlan(VF.Width, IC);
7987 
7988   using namespace ore;
7989   bool DisableRuntimeUnroll = false;
7990   MDNode *OrigLoopID = L->getLoopID();
7991 
7992   if (!VectorizeLoop) {
7993     assert(IC > 1 && "interleave count should not be 1 or 0");
7994     // If we decided that it is not legal to vectorize the loop, then
7995     // interleave it.
7996     InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
7997                                &CM);
7998     LVP.executePlan(Unroller, DT);
7999 
8000     ORE->emit([&]() {
8001       return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
8002                                 L->getHeader())
8003              << "interleaved loop (interleaved count: "
8004              << NV("InterleaveCount", IC) << ")";
8005     });
8006   } else {
8007     // If we decided that it is *legal* to vectorize the loop, then do it.
8008     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
8009                            &LVL, &CM);
8010     LVP.executePlan(LB, DT);
8011     ++LoopsVectorized;
8012 
8013     // Add metadata to disable runtime unrolling a scalar loop when there are
8014     // no runtime checks about strides and memory. A scalar loop that is
8015     // rarely used is not worth unrolling.
8016     if (!LB.areSafetyChecksAdded())
8017       DisableRuntimeUnroll = true;
8018 
8019     // Report the vectorization decision.
8020     ORE->emit([&]() {
8021       return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
8022                                 L->getHeader())
8023              << "vectorized loop (vectorization width: "
8024              << NV("VectorizationFactor", VF.Width)
8025              << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
8026     });
8027   }
8028 
8029   Optional<MDNode *> RemainderLoopID =
8030       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
8031                                       LLVMLoopVectorizeFollowupEpilogue});
8032   if (RemainderLoopID.hasValue()) {
8033     L->setLoopID(RemainderLoopID.getValue());
8034   } else {
8035     if (DisableRuntimeUnroll)
8036       AddRuntimeUnrollDisableMetaData(L);
8037 
8038     // Mark the loop as already vectorized to avoid vectorizing again.
8039     Hints.setAlreadyVectorized();
8040   }
8041 
8042   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
8043   return true;
8044 }
8045 
8046 LoopVectorizeResult LoopVectorizePass::runImpl(
8047     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
8048     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
8049     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
8050     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
8051     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
8052   SE = &SE_;
8053   LI = &LI_;
8054   TTI = &TTI_;
8055   DT = &DT_;
8056   BFI = &BFI_;
8057   TLI = TLI_;
8058   AA = &AA_;
8059   AC = &AC_;
8060   GetLAA = &GetLAA_;
8061   DB = &DB_;
8062   ORE = &ORE_;
8063   PSI = PSI_;
8064 
8065   // Don't attempt if
8066   // 1. the target claims to have no vector registers, and
8067   // 2. interleaving won't help ILP.
8068   //
8069   // The second condition is necessary because, even if the target has no
8070   // vector registers, loop vectorization may still enable scalar
8071   // interleaving.
8072   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
8073       TTI->getMaxInterleaveFactor(1) < 2)
8074     return LoopVectorizeResult(false, false);
8075 
8076   bool Changed = false, CFGChanged = false;
8077 
8078   // The vectorizer requires loops to be in simplified form.
8079   // Since simplification may add new inner loops, it has to run before the
8080   // legality and profitability checks. This means running the loop vectorizer
8081   // will simplify all loops, regardless of whether anything end up being
8082   // vectorized.
8083   for (auto &L : *LI)
8084     Changed |= CFGChanged |=
8085         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
8086 
8087   // Build up a worklist of inner-loops to vectorize. This is necessary as
8088   // the act of vectorizing or partially unrolling a loop creates new loops
8089   // and can invalidate iterators across the loops.
8090   SmallVector<Loop *, 8> Worklist;
8091 
8092   for (Loop *L : *LI)
8093     collectSupportedLoops(*L, LI, ORE, Worklist);
8094 
8095   LoopsAnalyzed += Worklist.size();
8096 
8097   // Now walk the identified inner loops.
8098   while (!Worklist.empty()) {
8099     Loop *L = Worklist.pop_back_val();
8100 
8101     // For the inner loops we actually process, form LCSSA to simplify the
8102     // transform.
8103     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
8104 
8105     Changed |= CFGChanged |= processLoop(L);
8106   }
8107 
8108   // Process each loop nest in the function.
8109   return LoopVectorizeResult(Changed, CFGChanged);
8110 }
8111 
8112 PreservedAnalyses LoopVectorizePass::run(Function &F,
8113                                          FunctionAnalysisManager &AM) {
8114     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
8115     auto &LI = AM.getResult<LoopAnalysis>(F);
8116     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
8117     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
8118     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
8119     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
8120     auto &AA = AM.getResult<AAManager>(F);
8121     auto &AC = AM.getResult<AssumptionAnalysis>(F);
8122     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
8123     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
8124     MemorySSA *MSSA = EnableMSSALoopDependency
8125                           ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
8126                           : nullptr;
8127 
8128     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
8129     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
8130         [&](Loop &L) -> const LoopAccessInfo & {
8131       LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, MSSA};
8132       return LAM.getResult<LoopAccessAnalysis>(L, AR);
8133     };
8134     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
8135     ProfileSummaryInfo *PSI =
8136         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
8137     LoopVectorizeResult Result =
8138         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
8139     if (!Result.MadeAnyChange)
8140       return PreservedAnalyses::all();
8141     PreservedAnalyses PA;
8142 
8143     // We currently do not preserve loopinfo/dominator analyses with outer loop
8144     // vectorization. Until this is addressed, mark these analyses as preserved
8145     // only for non-VPlan-native path.
8146     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
8147     if (!EnableVPlanNativePath) {
8148       PA.preserve<LoopAnalysis>();
8149       PA.preserve<DominatorTreeAnalysis>();
8150     }
8151     PA.preserve<BasicAA>();
8152     PA.preserve<GlobalsAA>();
8153     if (!Result.MadeCFGChange)
8154       PA.preserveSet<CFGAnalyses>();
8155     return PA;
8156 }
8157