1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/MemorySSA.h"
91 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
92 #include "llvm/Analysis/ProfileSummaryInfo.h"
93 #include "llvm/Analysis/ScalarEvolution.h"
94 #include "llvm/Analysis/ScalarEvolutionExpander.h"
95 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
96 #include "llvm/Analysis/TargetLibraryInfo.h"
97 #include "llvm/Analysis/TargetTransformInfo.h"
98 #include "llvm/Analysis/VectorUtils.h"
99 #include "llvm/IR/Attributes.h"
100 #include "llvm/IR/BasicBlock.h"
101 #include "llvm/IR/CFG.h"
102 #include "llvm/IR/Constant.h"
103 #include "llvm/IR/Constants.h"
104 #include "llvm/IR/DataLayout.h"
105 #include "llvm/IR/DebugInfoMetadata.h"
106 #include "llvm/IR/DebugLoc.h"
107 #include "llvm/IR/DerivedTypes.h"
108 #include "llvm/IR/DiagnosticInfo.h"
109 #include "llvm/IR/Dominators.h"
110 #include "llvm/IR/Function.h"
111 #include "llvm/IR/IRBuilder.h"
112 #include "llvm/IR/InstrTypes.h"
113 #include "llvm/IR/Instruction.h"
114 #include "llvm/IR/Instructions.h"
115 #include "llvm/IR/IntrinsicInst.h"
116 #include "llvm/IR/Intrinsics.h"
117 #include "llvm/IR/LLVMContext.h"
118 #include "llvm/IR/Metadata.h"
119 #include "llvm/IR/Module.h"
120 #include "llvm/IR/Operator.h"
121 #include "llvm/IR/Type.h"
122 #include "llvm/IR/Use.h"
123 #include "llvm/IR/User.h"
124 #include "llvm/IR/Value.h"
125 #include "llvm/IR/ValueHandle.h"
126 #include "llvm/IR/Verifier.h"
127 #include "llvm/InitializePasses.h"
128 #include "llvm/Pass.h"
129 #include "llvm/Support/Casting.h"
130 #include "llvm/Support/CommandLine.h"
131 #include "llvm/Support/Compiler.h"
132 #include "llvm/Support/Debug.h"
133 #include "llvm/Support/ErrorHandling.h"
134 #include "llvm/Support/MathExtras.h"
135 #include "llvm/Support/raw_ostream.h"
136 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
137 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
138 #include "llvm/Transforms/Utils/LoopSimplify.h"
139 #include "llvm/Transforms/Utils/LoopUtils.h"
140 #include "llvm/Transforms/Utils/LoopVersioning.h"
141 #include "llvm/Transforms/Utils/SizeOpts.h"
142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
143 #include <algorithm>
144 #include <cassert>
145 #include <cstdint>
146 #include <cstdlib>
147 #include <functional>
148 #include <iterator>
149 #include <limits>
150 #include <memory>
151 #include <string>
152 #include <tuple>
153 #include <utility>
154 
155 using namespace llvm;
156 
157 #define LV_NAME "loop-vectorize"
158 #define DEBUG_TYPE LV_NAME
159 
160 /// @{
161 /// Metadata attribute names
162 static const char *const LLVMLoopVectorizeFollowupAll =
163     "llvm.loop.vectorize.followup_all";
164 static const char *const LLVMLoopVectorizeFollowupVectorized =
165     "llvm.loop.vectorize.followup_vectorized";
166 static const char *const LLVMLoopVectorizeFollowupEpilogue =
167     "llvm.loop.vectorize.followup_epilogue";
168 /// @}
169 
170 STATISTIC(LoopsVectorized, "Number of loops vectorized");
171 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
172 
173 /// Loops with a known constant trip count below this number are vectorized only
174 /// if no scalar iteration overheads are incurred.
175 static cl::opt<unsigned> TinyTripCountVectorThreshold(
176     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
177     cl::desc("Loops with a constant trip count that is smaller than this "
178              "value are vectorized only if no scalar iteration overheads "
179              "are incurred."));
180 
181 // Indicates that an epilogue is undesired, predication is preferred.
182 // This means that the vectorizer will try to fold the loop-tail (epilogue)
183 // into the loop and predicate the loop body accordingly.
184 static cl::opt<bool> PreferPredicateOverEpilog(
185     "prefer-predicate-over-epilog", cl::init(false), cl::Hidden,
186     cl::desc("Indicate that an epilogue is undesired, predication should be "
187              "used instead."));
188 
189 static cl::opt<bool> MaximizeBandwidth(
190     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
191     cl::desc("Maximize bandwidth when selecting vectorization factor which "
192              "will be determined by the smallest type in loop."));
193 
194 static cl::opt<bool> EnableInterleavedMemAccesses(
195     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
196     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
197 
198 /// An interleave-group may need masking if it resides in a block that needs
199 /// predication, or in order to mask away gaps.
200 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
201     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
202     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
203 
204 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
205     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
206     cl::desc("We don't interleave loops with a estimated constant trip count "
207              "below this number"));
208 
209 static cl::opt<unsigned> ForceTargetNumScalarRegs(
210     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
211     cl::desc("A flag that overrides the target's number of scalar registers."));
212 
213 static cl::opt<unsigned> ForceTargetNumVectorRegs(
214     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
215     cl::desc("A flag that overrides the target's number of vector registers."));
216 
217 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
218     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
219     cl::desc("A flag that overrides the target's max interleave factor for "
220              "scalar loops."));
221 
222 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
223     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
224     cl::desc("A flag that overrides the target's max interleave factor for "
225              "vectorized loops."));
226 
227 static cl::opt<unsigned> ForceTargetInstructionCost(
228     "force-target-instruction-cost", cl::init(0), cl::Hidden,
229     cl::desc("A flag that overrides the target's expected cost for "
230              "an instruction to a single constant value. Mostly "
231              "useful for getting consistent testing."));
232 
233 static cl::opt<unsigned> SmallLoopCost(
234     "small-loop-cost", cl::init(20), cl::Hidden,
235     cl::desc(
236         "The cost of a loop that is considered 'small' by the interleaver."));
237 
238 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
239     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
240     cl::desc("Enable the use of the block frequency analysis to access PGO "
241              "heuristics minimizing code growth in cold regions and being more "
242              "aggressive in hot regions."));
243 
244 // Runtime interleave loops for load/store throughput.
245 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
246     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
247     cl::desc(
248         "Enable runtime interleaving until load/store ports are saturated"));
249 
250 /// The number of stores in a loop that are allowed to need predication.
251 static cl::opt<unsigned> NumberOfStoresToPredicate(
252     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
253     cl::desc("Max number of stores to be predicated behind an if."));
254 
255 static cl::opt<bool> EnableIndVarRegisterHeur(
256     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
257     cl::desc("Count the induction variable only once when interleaving"));
258 
259 static cl::opt<bool> EnableCondStoresVectorization(
260     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
261     cl::desc("Enable if predication of stores during vectorization."));
262 
263 static cl::opt<unsigned> MaxNestedScalarReductionIC(
264     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
265     cl::desc("The maximum interleave count to use when interleaving a scalar "
266              "reduction in a nested loop."));
267 
268 cl::opt<bool> EnableVPlanNativePath(
269     "enable-vplan-native-path", cl::init(false), cl::Hidden,
270     cl::desc("Enable VPlan-native vectorization path with "
271              "support for outer loop vectorization."));
272 
273 // FIXME: Remove this switch once we have divergence analysis. Currently we
274 // assume divergent non-backedge branches when this switch is true.
275 cl::opt<bool> EnableVPlanPredication(
276     "enable-vplan-predication", cl::init(false), cl::Hidden,
277     cl::desc("Enable VPlan-native vectorization path predicator with "
278              "support for outer loop vectorization."));
279 
280 // This flag enables the stress testing of the VPlan H-CFG construction in the
281 // VPlan-native vectorization path. It must be used in conjuction with
282 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
283 // verification of the H-CFGs built.
284 static cl::opt<bool> VPlanBuildStressTest(
285     "vplan-build-stress-test", cl::init(false), cl::Hidden,
286     cl::desc(
287         "Build VPlan for every supported loop nest in the function and bail "
288         "out right after the build (stress test the VPlan H-CFG construction "
289         "in the VPlan-native vectorization path)."));
290 
291 cl::opt<bool> llvm::EnableLoopInterleaving(
292     "interleave-loops", cl::init(true), cl::Hidden,
293     cl::desc("Enable loop interleaving in Loop vectorization passes"));
294 cl::opt<bool> llvm::EnableLoopVectorization(
295     "vectorize-loops", cl::init(true), cl::Hidden,
296     cl::desc("Run the Loop vectorization passes"));
297 
298 /// A helper function that returns the type of loaded or stored value.
299 static Type *getMemInstValueType(Value *I) {
300   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
301          "Expected Load or Store instruction");
302   if (auto *LI = dyn_cast<LoadInst>(I))
303     return LI->getType();
304   return cast<StoreInst>(I)->getValueOperand()->getType();
305 }
306 
307 /// A helper function that returns true if the given type is irregular. The
308 /// type is irregular if its allocated size doesn't equal the store size of an
309 /// element of the corresponding vector type at the given vectorization factor.
310 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) {
311   // Determine if an array of VF elements of type Ty is "bitcast compatible"
312   // with a <VF x Ty> vector.
313   if (VF > 1) {
314     auto *VectorTy = VectorType::get(Ty, VF);
315     return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy);
316   }
317 
318   // If the vectorization factor is one, we just check if an array of type Ty
319   // requires padding between elements.
320   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
321 }
322 
323 /// A helper function that returns the reciprocal of the block probability of
324 /// predicated blocks. If we return X, we are assuming the predicated block
325 /// will execute once for every X iterations of the loop header.
326 ///
327 /// TODO: We should use actual block probability here, if available. Currently,
328 ///       we always assume predicated blocks have a 50% chance of executing.
329 static unsigned getReciprocalPredBlockProb() { return 2; }
330 
331 /// A helper function that adds a 'fast' flag to floating-point operations.
332 static Value *addFastMathFlag(Value *V) {
333   if (isa<FPMathOperator>(V))
334     cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast());
335   return V;
336 }
337 
338 static Value *addFastMathFlag(Value *V, FastMathFlags FMF) {
339   if (isa<FPMathOperator>(V))
340     cast<Instruction>(V)->setFastMathFlags(FMF);
341   return V;
342 }
343 
344 /// A helper function that returns an integer or floating-point constant with
345 /// value C.
346 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
347   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
348                            : ConstantFP::get(Ty, C);
349 }
350 
351 /// Returns "best known" trip count for the specified loop \p L as defined by
352 /// the following procedure:
353 ///   1) Returns exact trip count if it is known.
354 ///   2) Returns expected trip count according to profile data if any.
355 ///   3) Returns upper bound estimate if it is known.
356 ///   4) Returns None if all of the above failed.
357 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
358   // Check if exact trip count is known.
359   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
360     return ExpectedTC;
361 
362   // Check if there is an expected trip count available from profile data.
363   if (LoopVectorizeWithBlockFrequency)
364     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
365       return EstimatedTC;
366 
367   // Check if upper bound estimate is known.
368   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
369     return ExpectedTC;
370 
371   return None;
372 }
373 
374 namespace llvm {
375 
376 /// InnerLoopVectorizer vectorizes loops which contain only one basic
377 /// block to a specified vectorization factor (VF).
378 /// This class performs the widening of scalars into vectors, or multiple
379 /// scalars. This class also implements the following features:
380 /// * It inserts an epilogue loop for handling loops that don't have iteration
381 ///   counts that are known to be a multiple of the vectorization factor.
382 /// * It handles the code generation for reduction variables.
383 /// * Scalarization (implementation using scalars) of un-vectorizable
384 ///   instructions.
385 /// InnerLoopVectorizer does not perform any vectorization-legality
386 /// checks, and relies on the caller to check for the different legality
387 /// aspects. The InnerLoopVectorizer relies on the
388 /// LoopVectorizationLegality class to provide information about the induction
389 /// and reduction variables that were found to a given vectorization factor.
390 class InnerLoopVectorizer {
391 public:
392   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
393                       LoopInfo *LI, DominatorTree *DT,
394                       const TargetLibraryInfo *TLI,
395                       const TargetTransformInfo *TTI, AssumptionCache *AC,
396                       OptimizationRemarkEmitter *ORE, unsigned VecWidth,
397                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
398                       LoopVectorizationCostModel *CM)
399       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
400         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
401         Builder(PSE.getSE()->getContext()),
402         VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {}
403   virtual ~InnerLoopVectorizer() = default;
404 
405   /// Create a new empty loop. Unlink the old loop and connect the new one.
406   /// Return the pre-header block of the new loop.
407   BasicBlock *createVectorizedLoopSkeleton();
408 
409   /// Widen a single instruction within the innermost loop.
410   void widenInstruction(Instruction &I, VPUser &Operands,
411                         VPTransformState &State);
412 
413   /// Widen a single call instruction within the innermost loop.
414   void widenCallInstruction(CallInst &I, VPUser &ArgOperands,
415                             VPTransformState &State);
416 
417   /// Widen a single select instruction within the innermost loop.
418   void widenSelectInstruction(SelectInst &I, bool InvariantCond);
419 
420   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
421   void fixVectorizedLoop();
422 
423   // Return true if any runtime check is added.
424   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
425 
426   /// A type for vectorized values in the new loop. Each value from the
427   /// original loop, when vectorized, is represented by UF vector values in the
428   /// new unrolled loop, where UF is the unroll factor.
429   using VectorParts = SmallVector<Value *, 2>;
430 
431   /// Vectorize a single GetElementPtrInst based on information gathered and
432   /// decisions taken during planning.
433   void widenGEP(GetElementPtrInst *GEP, unsigned UF, unsigned VF,
434                 bool IsPtrLoopInvariant, SmallBitVector &IsIndexLoopInvariant);
435 
436   /// Vectorize a single PHINode in a block. This method handles the induction
437   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
438   /// arbitrary length vectors.
439   void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF);
440 
441   /// A helper function to scalarize a single Instruction in the innermost loop.
442   /// Generates a sequence of scalar instances for each lane between \p MinLane
443   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
444   /// inclusive..
445   void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance,
446                             bool IfPredicateInstr);
447 
448   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
449   /// is provided, the integer induction variable will first be truncated to
450   /// the corresponding type.
451   void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr);
452 
453   /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a
454   /// vector or scalar value on-demand if one is not yet available. When
455   /// vectorizing a loop, we visit the definition of an instruction before its
456   /// uses. When visiting the definition, we either vectorize or scalarize the
457   /// instruction, creating an entry for it in the corresponding map. (In some
458   /// cases, such as induction variables, we will create both vector and scalar
459   /// entries.) Then, as we encounter uses of the definition, we derive values
460   /// for each scalar or vector use unless such a value is already available.
461   /// For example, if we scalarize a definition and one of its uses is vector,
462   /// we build the required vector on-demand with an insertelement sequence
463   /// when visiting the use. Otherwise, if the use is scalar, we can use the
464   /// existing scalar definition.
465   ///
466   /// Return a value in the new loop corresponding to \p V from the original
467   /// loop at unroll index \p Part. If the value has already been vectorized,
468   /// the corresponding vector entry in VectorLoopValueMap is returned. If,
469   /// however, the value has a scalar entry in VectorLoopValueMap, we construct
470   /// a new vector value on-demand by inserting the scalar values into a vector
471   /// with an insertelement sequence. If the value has been neither vectorized
472   /// nor scalarized, it must be loop invariant, so we simply broadcast the
473   /// value into a vector.
474   Value *getOrCreateVectorValue(Value *V, unsigned Part);
475 
476   /// Return a value in the new loop corresponding to \p V from the original
477   /// loop at unroll and vector indices \p Instance. If the value has been
478   /// vectorized but not scalarized, the necessary extractelement instruction
479   /// will be generated.
480   Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance);
481 
482   /// Construct the vector value of a scalarized value \p V one lane at a time.
483   void packScalarIntoVectorValue(Value *V, const VPIteration &Instance);
484 
485   /// Try to vectorize interleaved access group \p Group with the base address
486   /// given in \p Addr, optionally masking the vector operations if \p
487   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
488   /// values in the vectorized loop.
489   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
490                                 VPTransformState &State, VPValue *Addr,
491                                 VPValue *BlockInMask = nullptr);
492 
493   /// Vectorize Load and Store instructions with the base address given in \p
494   /// Addr, optionally masking the vector operations if \p BlockInMask is
495   /// non-null. Use \p State to translate given VPValues to IR values in the
496   /// vectorized loop.
497   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
498                                   VPValue *Addr, VPValue *StoredValue,
499                                   VPValue *BlockInMask);
500 
501   /// Set the debug location in the builder using the debug location in
502   /// the instruction.
503   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
504 
505   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
506   void fixNonInductionPHIs(void);
507 
508 protected:
509   friend class LoopVectorizationPlanner;
510 
511   /// A small list of PHINodes.
512   using PhiVector = SmallVector<PHINode *, 4>;
513 
514   /// A type for scalarized values in the new loop. Each value from the
515   /// original loop, when scalarized, is represented by UF x VF scalar values
516   /// in the new unrolled loop, where UF is the unroll factor and VF is the
517   /// vectorization factor.
518   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
519 
520   /// Set up the values of the IVs correctly when exiting the vector loop.
521   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
522                     Value *CountRoundDown, Value *EndValue,
523                     BasicBlock *MiddleBlock);
524 
525   /// Create a new induction variable inside L.
526   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
527                                    Value *Step, Instruction *DL);
528 
529   /// Handle all cross-iteration phis in the header.
530   void fixCrossIterationPHIs();
531 
532   /// Fix a first-order recurrence. This is the second phase of vectorizing
533   /// this phi node.
534   void fixFirstOrderRecurrence(PHINode *Phi);
535 
536   /// Fix a reduction cross-iteration phi. This is the second phase of
537   /// vectorizing this phi node.
538   void fixReduction(PHINode *Phi);
539 
540   /// Clear NSW/NUW flags from reduction instructions if necessary.
541   void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc);
542 
543   /// The Loop exit block may have single value PHI nodes with some
544   /// incoming value. While vectorizing we only handled real values
545   /// that were defined inside the loop and we should have one value for
546   /// each predecessor of its parent basic block. See PR14725.
547   void fixLCSSAPHIs();
548 
549   /// Iteratively sink the scalarized operands of a predicated instruction into
550   /// the block that was created for it.
551   void sinkScalarOperands(Instruction *PredInst);
552 
553   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
554   /// represented as.
555   void truncateToMinimalBitwidths();
556 
557   /// Create a broadcast instruction. This method generates a broadcast
558   /// instruction (shuffle) for loop invariant values and for the induction
559   /// value. If this is the induction variable then we extend it to N, N+1, ...
560   /// this is needed because each iteration in the loop corresponds to a SIMD
561   /// element.
562   virtual Value *getBroadcastInstrs(Value *V);
563 
564   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
565   /// to each vector element of Val. The sequence starts at StartIndex.
566   /// \p Opcode is relevant for FP induction variable.
567   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
568                                Instruction::BinaryOps Opcode =
569                                Instruction::BinaryOpsEnd);
570 
571   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
572   /// variable on which to base the steps, \p Step is the size of the step, and
573   /// \p EntryVal is the value from the original loop that maps to the steps.
574   /// Note that \p EntryVal doesn't have to be an induction variable - it
575   /// can also be a truncate instruction.
576   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
577                         const InductionDescriptor &ID);
578 
579   /// Create a vector induction phi node based on an existing scalar one. \p
580   /// EntryVal is the value from the original loop that maps to the vector phi
581   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
582   /// truncate instruction, instead of widening the original IV, we widen a
583   /// version of the IV truncated to \p EntryVal's type.
584   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
585                                        Value *Step, Instruction *EntryVal);
586 
587   /// Returns true if an instruction \p I should be scalarized instead of
588   /// vectorized for the chosen vectorization factor.
589   bool shouldScalarizeInstruction(Instruction *I) const;
590 
591   /// Returns true if we should generate a scalar version of \p IV.
592   bool needsScalarInduction(Instruction *IV) const;
593 
594   /// If there is a cast involved in the induction variable \p ID, which should
595   /// be ignored in the vectorized loop body, this function records the
596   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
597   /// cast. We had already proved that the casted Phi is equal to the uncasted
598   /// Phi in the vectorized loop (under a runtime guard), and therefore
599   /// there is no need to vectorize the cast - the same value can be used in the
600   /// vector loop for both the Phi and the cast.
601   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
602   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
603   ///
604   /// \p EntryVal is the value from the original loop that maps to the vector
605   /// phi node and is used to distinguish what is the IV currently being
606   /// processed - original one (if \p EntryVal is a phi corresponding to the
607   /// original IV) or the "newly-created" one based on the proof mentioned above
608   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
609   /// latter case \p EntryVal is a TruncInst and we must not record anything for
610   /// that IV, but it's error-prone to expect callers of this routine to care
611   /// about that, hence this explicit parameter.
612   void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID,
613                                              const Instruction *EntryVal,
614                                              Value *VectorLoopValue,
615                                              unsigned Part,
616                                              unsigned Lane = UINT_MAX);
617 
618   /// Generate a shuffle sequence that will reverse the vector Vec.
619   virtual Value *reverseVector(Value *Vec);
620 
621   /// Returns (and creates if needed) the original loop trip count.
622   Value *getOrCreateTripCount(Loop *NewLoop);
623 
624   /// Returns (and creates if needed) the trip count of the widened loop.
625   Value *getOrCreateVectorTripCount(Loop *NewLoop);
626 
627   /// Returns a bitcasted value to the requested vector type.
628   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
629   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
630                                 const DataLayout &DL);
631 
632   /// Emit a bypass check to see if the vector trip count is zero, including if
633   /// it overflows.
634   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
635 
636   /// Emit a bypass check to see if all of the SCEV assumptions we've
637   /// had to make are correct.
638   void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
639 
640   /// Emit bypass checks to check any memory assumptions we may have made.
641   void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
642 
643   /// Compute the transformed value of Index at offset StartValue using step
644   /// StepValue.
645   /// For integer induction, returns StartValue + Index * StepValue.
646   /// For pointer induction, returns StartValue[Index * StepValue].
647   /// FIXME: The newly created binary instructions should contain nsw/nuw
648   /// flags, which can be found from the original scalar operations.
649   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
650                               const DataLayout &DL,
651                               const InductionDescriptor &ID) const;
652 
653   /// Add additional metadata to \p To that was not present on \p Orig.
654   ///
655   /// Currently this is used to add the noalias annotations based on the
656   /// inserted memchecks.  Use this for instructions that are *cloned* into the
657   /// vector loop.
658   void addNewMetadata(Instruction *To, const Instruction *Orig);
659 
660   /// Add metadata from one instruction to another.
661   ///
662   /// This includes both the original MDs from \p From and additional ones (\see
663   /// addNewMetadata).  Use this for *newly created* instructions in the vector
664   /// loop.
665   void addMetadata(Instruction *To, Instruction *From);
666 
667   /// Similar to the previous function but it adds the metadata to a
668   /// vector of instructions.
669   void addMetadata(ArrayRef<Value *> To, Instruction *From);
670 
671   /// The original loop.
672   Loop *OrigLoop;
673 
674   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
675   /// dynamic knowledge to simplify SCEV expressions and converts them to a
676   /// more usable form.
677   PredicatedScalarEvolution &PSE;
678 
679   /// Loop Info.
680   LoopInfo *LI;
681 
682   /// Dominator Tree.
683   DominatorTree *DT;
684 
685   /// Alias Analysis.
686   AliasAnalysis *AA;
687 
688   /// Target Library Info.
689   const TargetLibraryInfo *TLI;
690 
691   /// Target Transform Info.
692   const TargetTransformInfo *TTI;
693 
694   /// Assumption Cache.
695   AssumptionCache *AC;
696 
697   /// Interface to emit optimization remarks.
698   OptimizationRemarkEmitter *ORE;
699 
700   /// LoopVersioning.  It's only set up (non-null) if memchecks were
701   /// used.
702   ///
703   /// This is currently only used to add no-alias metadata based on the
704   /// memchecks.  The actually versioning is performed manually.
705   std::unique_ptr<LoopVersioning> LVer;
706 
707   /// The vectorization SIMD factor to use. Each vector will have this many
708   /// vector elements.
709   unsigned VF;
710 
711   /// The vectorization unroll factor to use. Each scalar is vectorized to this
712   /// many different vector instructions.
713   unsigned UF;
714 
715   /// The builder that we use
716   IRBuilder<> Builder;
717 
718   // --- Vectorization state ---
719 
720   /// The vector-loop preheader.
721   BasicBlock *LoopVectorPreHeader;
722 
723   /// The scalar-loop preheader.
724   BasicBlock *LoopScalarPreHeader;
725 
726   /// Middle Block between the vector and the scalar.
727   BasicBlock *LoopMiddleBlock;
728 
729   /// The ExitBlock of the scalar loop.
730   BasicBlock *LoopExitBlock;
731 
732   /// The vector loop body.
733   BasicBlock *LoopVectorBody;
734 
735   /// The scalar loop body.
736   BasicBlock *LoopScalarBody;
737 
738   /// A list of all bypass blocks. The first block is the entry of the loop.
739   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
740 
741   /// The new Induction variable which was added to the new block.
742   PHINode *Induction = nullptr;
743 
744   /// The induction variable of the old basic block.
745   PHINode *OldInduction = nullptr;
746 
747   /// Maps values from the original loop to their corresponding values in the
748   /// vectorized loop. A key value can map to either vector values, scalar
749   /// values or both kinds of values, depending on whether the key was
750   /// vectorized and scalarized.
751   VectorizerValueMap VectorLoopValueMap;
752 
753   /// Store instructions that were predicated.
754   SmallVector<Instruction *, 4> PredicatedInstructions;
755 
756   /// Trip count of the original loop.
757   Value *TripCount = nullptr;
758 
759   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
760   Value *VectorTripCount = nullptr;
761 
762   /// The legality analysis.
763   LoopVectorizationLegality *Legal;
764 
765   /// The profitablity analysis.
766   LoopVectorizationCostModel *Cost;
767 
768   // Record whether runtime checks are added.
769   bool AddedSafetyChecks = false;
770 
771   // Holds the end values for each induction variable. We save the end values
772   // so we can later fix-up the external users of the induction variables.
773   DenseMap<PHINode *, Value *> IVEndValues;
774 
775   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
776   // fixed up at the end of vector code generation.
777   SmallVector<PHINode *, 8> OrigPHIsToFix;
778 };
779 
780 class InnerLoopUnroller : public InnerLoopVectorizer {
781 public:
782   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
783                     LoopInfo *LI, DominatorTree *DT,
784                     const TargetLibraryInfo *TLI,
785                     const TargetTransformInfo *TTI, AssumptionCache *AC,
786                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
787                     LoopVectorizationLegality *LVL,
788                     LoopVectorizationCostModel *CM)
789       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1,
790                             UnrollFactor, LVL, CM) {}
791 
792 private:
793   Value *getBroadcastInstrs(Value *V) override;
794   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
795                        Instruction::BinaryOps Opcode =
796                        Instruction::BinaryOpsEnd) override;
797   Value *reverseVector(Value *Vec) override;
798 };
799 
800 } // end namespace llvm
801 
802 /// Look for a meaningful debug location on the instruction or it's
803 /// operands.
804 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
805   if (!I)
806     return I;
807 
808   DebugLoc Empty;
809   if (I->getDebugLoc() != Empty)
810     return I;
811 
812   for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
813     if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
814       if (OpInst->getDebugLoc() != Empty)
815         return OpInst;
816   }
817 
818   return I;
819 }
820 
821 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
822   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
823     const DILocation *DIL = Inst->getDebugLoc();
824     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
825         !isa<DbgInfoIntrinsic>(Inst)) {
826       auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(UF * VF);
827       if (NewDIL)
828         B.SetCurrentDebugLocation(NewDIL.getValue());
829       else
830         LLVM_DEBUG(dbgs()
831                    << "Failed to create new discriminator: "
832                    << DIL->getFilename() << " Line: " << DIL->getLine());
833     }
834     else
835       B.SetCurrentDebugLocation(DIL);
836   } else
837     B.SetCurrentDebugLocation(DebugLoc());
838 }
839 
840 /// Write a record \p DebugMsg about vectorization failure to the debug
841 /// output stream. If \p I is passed, it is an instruction that prevents
842 /// vectorization.
843 #ifndef NDEBUG
844 static void debugVectorizationFailure(const StringRef DebugMsg,
845     Instruction *I) {
846   dbgs() << "LV: Not vectorizing: " << DebugMsg;
847   if (I != nullptr)
848     dbgs() << " " << *I;
849   else
850     dbgs() << '.';
851   dbgs() << '\n';
852 }
853 #endif
854 
855 /// Create an analysis remark that explains why vectorization failed
856 ///
857 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
858 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
859 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
860 /// the location of the remark.  \return the remark object that can be
861 /// streamed to.
862 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
863     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
864   Value *CodeRegion = TheLoop->getHeader();
865   DebugLoc DL = TheLoop->getStartLoc();
866 
867   if (I) {
868     CodeRegion = I->getParent();
869     // If there is no debug location attached to the instruction, revert back to
870     // using the loop's.
871     if (I->getDebugLoc())
872       DL = I->getDebugLoc();
873   }
874 
875   OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
876   R << "loop not vectorized: ";
877   return R;
878 }
879 
880 namespace llvm {
881 
882 void reportVectorizationFailure(const StringRef DebugMsg,
883     const StringRef OREMsg, const StringRef ORETag,
884     OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) {
885   LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I));
886   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
887   ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(),
888                 ORETag, TheLoop, I) << OREMsg);
889 }
890 
891 } // end namespace llvm
892 
893 #ifndef NDEBUG
894 /// \return string containing a file name and a line # for the given loop.
895 static std::string getDebugLocString(const Loop *L) {
896   std::string Result;
897   if (L) {
898     raw_string_ostream OS(Result);
899     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
900       LoopDbgLoc.print(OS);
901     else
902       // Just print the module name.
903       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
904     OS.flush();
905   }
906   return Result;
907 }
908 #endif
909 
910 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
911                                          const Instruction *Orig) {
912   // If the loop was versioned with memchecks, add the corresponding no-alias
913   // metadata.
914   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
915     LVer->annotateInstWithNoAlias(To, Orig);
916 }
917 
918 void InnerLoopVectorizer::addMetadata(Instruction *To,
919                                       Instruction *From) {
920   propagateMetadata(To, From);
921   addNewMetadata(To, From);
922 }
923 
924 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
925                                       Instruction *From) {
926   for (Value *V : To) {
927     if (Instruction *I = dyn_cast<Instruction>(V))
928       addMetadata(I, From);
929   }
930 }
931 
932 namespace llvm {
933 
934 // Loop vectorization cost-model hints how the scalar epilogue loop should be
935 // lowered.
936 enum ScalarEpilogueLowering {
937 
938   // The default: allowing scalar epilogues.
939   CM_ScalarEpilogueAllowed,
940 
941   // Vectorization with OptForSize: don't allow epilogues.
942   CM_ScalarEpilogueNotAllowedOptSize,
943 
944   // A special case of vectorisation with OptForSize: loops with a very small
945   // trip count are considered for vectorization under OptForSize, thereby
946   // making sure the cost of their loop body is dominant, free of runtime
947   // guards and scalar iteration overheads.
948   CM_ScalarEpilogueNotAllowedLowTripLoop,
949 
950   // Loop hint predicate indicating an epilogue is undesired.
951   CM_ScalarEpilogueNotNeededUsePredicate
952 };
953 
954 /// LoopVectorizationCostModel - estimates the expected speedups due to
955 /// vectorization.
956 /// In many cases vectorization is not profitable. This can happen because of
957 /// a number of reasons. In this class we mainly attempt to predict the
958 /// expected speedup/slowdowns due to the supported instruction set. We use the
959 /// TargetTransformInfo to query the different backends for the cost of
960 /// different operations.
961 class LoopVectorizationCostModel {
962 public:
963   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
964                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
965                              LoopVectorizationLegality *Legal,
966                              const TargetTransformInfo &TTI,
967                              const TargetLibraryInfo *TLI, DemandedBits *DB,
968                              AssumptionCache *AC,
969                              OptimizationRemarkEmitter *ORE, const Function *F,
970                              const LoopVectorizeHints *Hints,
971                              InterleavedAccessInfo &IAI)
972       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
973         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
974         Hints(Hints), InterleaveInfo(IAI) {}
975 
976   /// \return An upper bound for the vectorization factor, or None if
977   /// vectorization and interleaving should be avoided up front.
978   Optional<unsigned> computeMaxVF();
979 
980   /// \return True if runtime checks are required for vectorization, and false
981   /// otherwise.
982   bool runtimeChecksRequired();
983 
984   /// \return The most profitable vectorization factor and the cost of that VF.
985   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
986   /// then this vectorization factor will be selected if vectorization is
987   /// possible.
988   VectorizationFactor selectVectorizationFactor(unsigned MaxVF);
989 
990   /// Setup cost-based decisions for user vectorization factor.
991   void selectUserVectorizationFactor(unsigned UserVF) {
992     collectUniformsAndScalars(UserVF);
993     collectInstsToScalarize(UserVF);
994   }
995 
996   /// \return The size (in bits) of the smallest and widest types in the code
997   /// that needs to be vectorized. We ignore values that remain scalar such as
998   /// 64 bit loop indices.
999   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1000 
1001   /// \return The desired interleave count.
1002   /// If interleave count has been specified by metadata it will be returned.
1003   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1004   /// are the selected vectorization factor and the cost of the selected VF.
1005   unsigned selectInterleaveCount(unsigned VF, unsigned LoopCost);
1006 
1007   /// Memory access instruction may be vectorized in more than one way.
1008   /// Form of instruction after vectorization depends on cost.
1009   /// This function takes cost-based decisions for Load/Store instructions
1010   /// and collects them in a map. This decisions map is used for building
1011   /// the lists of loop-uniform and loop-scalar instructions.
1012   /// The calculated cost is saved with widening decision in order to
1013   /// avoid redundant calculations.
1014   void setCostBasedWideningDecision(unsigned VF);
1015 
1016   /// A struct that represents some properties of the register usage
1017   /// of a loop.
1018   struct RegisterUsage {
1019     /// Holds the number of loop invariant values that are used in the loop.
1020     /// The key is ClassID of target-provided register class.
1021     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1022     /// Holds the maximum number of concurrent live intervals in the loop.
1023     /// The key is ClassID of target-provided register class.
1024     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1025   };
1026 
1027   /// \return Returns information about the register usages of the loop for the
1028   /// given vectorization factors.
1029   SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs);
1030 
1031   /// Collect values we want to ignore in the cost model.
1032   void collectValuesToIgnore();
1033 
1034   /// \returns The smallest bitwidth each instruction can be represented with.
1035   /// The vector equivalents of these instructions should be truncated to this
1036   /// type.
1037   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1038     return MinBWs;
1039   }
1040 
1041   /// \returns True if it is more profitable to scalarize instruction \p I for
1042   /// vectorization factor \p VF.
1043   bool isProfitableToScalarize(Instruction *I, unsigned VF) const {
1044     assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1.");
1045 
1046     // Cost model is not run in the VPlan-native path - return conservative
1047     // result until this changes.
1048     if (EnableVPlanNativePath)
1049       return false;
1050 
1051     auto Scalars = InstsToScalarize.find(VF);
1052     assert(Scalars != InstsToScalarize.end() &&
1053            "VF not yet analyzed for scalarization profitability");
1054     return Scalars->second.find(I) != Scalars->second.end();
1055   }
1056 
1057   /// Returns true if \p I is known to be uniform after vectorization.
1058   bool isUniformAfterVectorization(Instruction *I, unsigned VF) const {
1059     if (VF == 1)
1060       return true;
1061 
1062     // Cost model is not run in the VPlan-native path - return conservative
1063     // result until this changes.
1064     if (EnableVPlanNativePath)
1065       return false;
1066 
1067     auto UniformsPerVF = Uniforms.find(VF);
1068     assert(UniformsPerVF != Uniforms.end() &&
1069            "VF not yet analyzed for uniformity");
1070     return UniformsPerVF->second.find(I) != UniformsPerVF->second.end();
1071   }
1072 
1073   /// Returns true if \p I is known to be scalar after vectorization.
1074   bool isScalarAfterVectorization(Instruction *I, unsigned VF) const {
1075     if (VF == 1)
1076       return true;
1077 
1078     // Cost model is not run in the VPlan-native path - return conservative
1079     // result until this changes.
1080     if (EnableVPlanNativePath)
1081       return false;
1082 
1083     auto ScalarsPerVF = Scalars.find(VF);
1084     assert(ScalarsPerVF != Scalars.end() &&
1085            "Scalar values are not calculated for VF");
1086     return ScalarsPerVF->second.find(I) != ScalarsPerVF->second.end();
1087   }
1088 
1089   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1090   /// for vectorization factor \p VF.
1091   bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const {
1092     return VF > 1 && MinBWs.find(I) != MinBWs.end() &&
1093            !isProfitableToScalarize(I, VF) &&
1094            !isScalarAfterVectorization(I, VF);
1095   }
1096 
1097   /// Decision that was taken during cost calculation for memory instruction.
1098   enum InstWidening {
1099     CM_Unknown,
1100     CM_Widen,         // For consecutive accesses with stride +1.
1101     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1102     CM_Interleave,
1103     CM_GatherScatter,
1104     CM_Scalarize
1105   };
1106 
1107   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1108   /// instruction \p I and vector width \p VF.
1109   void setWideningDecision(Instruction *I, unsigned VF, InstWidening W,
1110                            unsigned Cost) {
1111     assert(VF >= 2 && "Expected VF >=2");
1112     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1113   }
1114 
1115   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1116   /// interleaving group \p Grp and vector width \p VF.
1117   void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF,
1118                            InstWidening W, unsigned Cost) {
1119     assert(VF >= 2 && "Expected VF >=2");
1120     /// Broadcast this decicion to all instructions inside the group.
1121     /// But the cost will be assigned to one instruction only.
1122     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1123       if (auto *I = Grp->getMember(i)) {
1124         if (Grp->getInsertPos() == I)
1125           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1126         else
1127           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1128       }
1129     }
1130   }
1131 
1132   /// Return the cost model decision for the given instruction \p I and vector
1133   /// width \p VF. Return CM_Unknown if this instruction did not pass
1134   /// through the cost modeling.
1135   InstWidening getWideningDecision(Instruction *I, unsigned VF) {
1136     assert(VF >= 2 && "Expected VF >=2");
1137 
1138     // Cost model is not run in the VPlan-native path - return conservative
1139     // result until this changes.
1140     if (EnableVPlanNativePath)
1141       return CM_GatherScatter;
1142 
1143     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1144     auto Itr = WideningDecisions.find(InstOnVF);
1145     if (Itr == WideningDecisions.end())
1146       return CM_Unknown;
1147     return Itr->second.first;
1148   }
1149 
1150   /// Return the vectorization cost for the given instruction \p I and vector
1151   /// width \p VF.
1152   unsigned getWideningCost(Instruction *I, unsigned VF) {
1153     assert(VF >= 2 && "Expected VF >=2");
1154     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1155     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1156            "The cost is not calculated");
1157     return WideningDecisions[InstOnVF].second;
1158   }
1159 
1160   /// Return True if instruction \p I is an optimizable truncate whose operand
1161   /// is an induction variable. Such a truncate will be removed by adding a new
1162   /// induction variable with the destination type.
1163   bool isOptimizableIVTruncate(Instruction *I, unsigned VF) {
1164     // If the instruction is not a truncate, return false.
1165     auto *Trunc = dyn_cast<TruncInst>(I);
1166     if (!Trunc)
1167       return false;
1168 
1169     // Get the source and destination types of the truncate.
1170     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1171     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1172 
1173     // If the truncate is free for the given types, return false. Replacing a
1174     // free truncate with an induction variable would add an induction variable
1175     // update instruction to each iteration of the loop. We exclude from this
1176     // check the primary induction variable since it will need an update
1177     // instruction regardless.
1178     Value *Op = Trunc->getOperand(0);
1179     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1180       return false;
1181 
1182     // If the truncated value is not an induction variable, return false.
1183     return Legal->isInductionPhi(Op);
1184   }
1185 
1186   /// Collects the instructions to scalarize for each predicated instruction in
1187   /// the loop.
1188   void collectInstsToScalarize(unsigned VF);
1189 
1190   /// Collect Uniform and Scalar values for the given \p VF.
1191   /// The sets depend on CM decision for Load/Store instructions
1192   /// that may be vectorized as interleave, gather-scatter or scalarized.
1193   void collectUniformsAndScalars(unsigned VF) {
1194     // Do the analysis once.
1195     if (VF == 1 || Uniforms.find(VF) != Uniforms.end())
1196       return;
1197     setCostBasedWideningDecision(VF);
1198     collectLoopUniforms(VF);
1199     collectLoopScalars(VF);
1200   }
1201 
1202   /// Returns true if the target machine supports masked store operation
1203   /// for the given \p DataType and kind of access to \p Ptr.
1204   bool isLegalMaskedStore(Type *DataType, Value *Ptr, MaybeAlign Alignment) {
1205     return Legal->isConsecutivePtr(Ptr) &&
1206            TTI.isLegalMaskedStore(DataType, Alignment);
1207   }
1208 
1209   /// Returns true if the target machine supports masked load operation
1210   /// for the given \p DataType and kind of access to \p Ptr.
1211   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, MaybeAlign Alignment) {
1212     return Legal->isConsecutivePtr(Ptr) &&
1213            TTI.isLegalMaskedLoad(DataType, Alignment);
1214   }
1215 
1216   /// Returns true if the target machine supports masked scatter operation
1217   /// for the given \p DataType.
1218   bool isLegalMaskedScatter(Type *DataType, MaybeAlign Alignment) {
1219     return TTI.isLegalMaskedScatter(DataType, Alignment);
1220   }
1221 
1222   /// Returns true if the target machine supports masked gather operation
1223   /// for the given \p DataType.
1224   bool isLegalMaskedGather(Type *DataType, MaybeAlign Alignment) {
1225     return TTI.isLegalMaskedGather(DataType, Alignment);
1226   }
1227 
1228   /// Returns true if the target machine can represent \p V as a masked gather
1229   /// or scatter operation.
1230   bool isLegalGatherOrScatter(Value *V) {
1231     bool LI = isa<LoadInst>(V);
1232     bool SI = isa<StoreInst>(V);
1233     if (!LI && !SI)
1234       return false;
1235     auto *Ty = getMemInstValueType(V);
1236     MaybeAlign Align = getLoadStoreAlignment(V);
1237     return (LI && isLegalMaskedGather(Ty, Align)) ||
1238            (SI && isLegalMaskedScatter(Ty, Align));
1239   }
1240 
1241   /// Returns true if \p I is an instruction that will be scalarized with
1242   /// predication. Such instructions include conditional stores and
1243   /// instructions that may divide by zero.
1244   /// If a non-zero VF has been calculated, we check if I will be scalarized
1245   /// predication for that VF.
1246   bool isScalarWithPredication(Instruction *I, unsigned VF = 1);
1247 
1248   // Returns true if \p I is an instruction that will be predicated either
1249   // through scalar predication or masked load/store or masked gather/scatter.
1250   // Superset of instructions that return true for isScalarWithPredication.
1251   bool isPredicatedInst(Instruction *I) {
1252     if (!blockNeedsPredication(I->getParent()))
1253       return false;
1254     // Loads and stores that need some form of masked operation are predicated
1255     // instructions.
1256     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1257       return Legal->isMaskRequired(I);
1258     return isScalarWithPredication(I);
1259   }
1260 
1261   /// Returns true if \p I is a memory instruction with consecutive memory
1262   /// access that can be widened.
1263   bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1);
1264 
1265   /// Returns true if \p I is a memory instruction in an interleaved-group
1266   /// of memory accesses that can be vectorized with wide vector loads/stores
1267   /// and shuffles.
1268   bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1);
1269 
1270   /// Check if \p Instr belongs to any interleaved access group.
1271   bool isAccessInterleaved(Instruction *Instr) {
1272     return InterleaveInfo.isInterleaved(Instr);
1273   }
1274 
1275   /// Get the interleaved access group that \p Instr belongs to.
1276   const InterleaveGroup<Instruction> *
1277   getInterleavedAccessGroup(Instruction *Instr) {
1278     return InterleaveInfo.getInterleaveGroup(Instr);
1279   }
1280 
1281   /// Returns true if an interleaved group requires a scalar iteration
1282   /// to handle accesses with gaps, and there is nothing preventing us from
1283   /// creating a scalar epilogue.
1284   bool requiresScalarEpilogue() const {
1285     return isScalarEpilogueAllowed() && InterleaveInfo.requiresScalarEpilogue();
1286   }
1287 
1288   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1289   /// loop hint annotation.
1290   bool isScalarEpilogueAllowed() const {
1291     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1292   }
1293 
1294   /// Returns true if all loop blocks should be masked to fold tail loop.
1295   bool foldTailByMasking() const { return FoldTailByMasking; }
1296 
1297   bool blockNeedsPredication(BasicBlock *BB) {
1298     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1299   }
1300 
1301   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1302   /// with factor VF.  Return the cost of the instruction, including
1303   /// scalarization overhead if it's needed.
1304   unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF);
1305 
1306   /// Estimate cost of a call instruction CI if it were vectorized with factor
1307   /// VF. Return the cost of the instruction, including scalarization overhead
1308   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1309   /// scalarized -
1310   /// i.e. either vector version isn't available, or is too expensive.
1311   unsigned getVectorCallCost(CallInst *CI, unsigned VF, bool &NeedToScalarize);
1312 
1313   /// Invalidates decisions already taken by the cost model.
1314   void invalidateCostModelingDecisions() {
1315     WideningDecisions.clear();
1316     Uniforms.clear();
1317     Scalars.clear();
1318   }
1319 
1320 private:
1321   unsigned NumPredStores = 0;
1322 
1323   /// \return An upper bound for the vectorization factor, larger than zero.
1324   /// One is returned if vectorization should best be avoided due to cost.
1325   unsigned computeFeasibleMaxVF(unsigned ConstTripCount);
1326 
1327   /// The vectorization cost is a combination of the cost itself and a boolean
1328   /// indicating whether any of the contributing operations will actually
1329   /// operate on
1330   /// vector values after type legalization in the backend. If this latter value
1331   /// is
1332   /// false, then all operations will be scalarized (i.e. no vectorization has
1333   /// actually taken place).
1334   using VectorizationCostTy = std::pair<unsigned, bool>;
1335 
1336   /// Returns the expected execution cost. The unit of the cost does
1337   /// not matter because we use the 'cost' units to compare different
1338   /// vector widths. The cost that is returned is *not* normalized by
1339   /// the factor width.
1340   VectorizationCostTy expectedCost(unsigned VF);
1341 
1342   /// Returns the execution time cost of an instruction for a given vector
1343   /// width. Vector width of one means scalar.
1344   VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF);
1345 
1346   /// The cost-computation logic from getInstructionCost which provides
1347   /// the vector type as an output parameter.
1348   unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy);
1349 
1350   /// Calculate vectorization cost of memory instruction \p I.
1351   unsigned getMemoryInstructionCost(Instruction *I, unsigned VF);
1352 
1353   /// The cost computation for scalarized memory instruction.
1354   unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF);
1355 
1356   /// The cost computation for interleaving group of memory instructions.
1357   unsigned getInterleaveGroupCost(Instruction *I, unsigned VF);
1358 
1359   /// The cost computation for Gather/Scatter instruction.
1360   unsigned getGatherScatterCost(Instruction *I, unsigned VF);
1361 
1362   /// The cost computation for widening instruction \p I with consecutive
1363   /// memory access.
1364   unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF);
1365 
1366   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1367   /// Load: scalar load + broadcast.
1368   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1369   /// element)
1370   unsigned getUniformMemOpCost(Instruction *I, unsigned VF);
1371 
1372   /// Estimate the overhead of scalarizing an instruction. This is a
1373   /// convenience wrapper for the type-based getScalarizationOverhead API.
1374   unsigned getScalarizationOverhead(Instruction *I, unsigned VF);
1375 
1376   /// Returns whether the instruction is a load or store and will be a emitted
1377   /// as a vector operation.
1378   bool isConsecutiveLoadOrStore(Instruction *I);
1379 
1380   /// Returns true if an artificially high cost for emulated masked memrefs
1381   /// should be used.
1382   bool useEmulatedMaskMemRefHack(Instruction *I);
1383 
1384   /// Map of scalar integer values to the smallest bitwidth they can be legally
1385   /// represented as. The vector equivalents of these values should be truncated
1386   /// to this type.
1387   MapVector<Instruction *, uint64_t> MinBWs;
1388 
1389   /// A type representing the costs for instructions if they were to be
1390   /// scalarized rather than vectorized. The entries are Instruction-Cost
1391   /// pairs.
1392   using ScalarCostsTy = DenseMap<Instruction *, unsigned>;
1393 
1394   /// A set containing all BasicBlocks that are known to present after
1395   /// vectorization as a predicated block.
1396   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1397 
1398   /// Records whether it is allowed to have the original scalar loop execute at
1399   /// least once. This may be needed as a fallback loop in case runtime
1400   /// aliasing/dependence checks fail, or to handle the tail/remainder
1401   /// iterations when the trip count is unknown or doesn't divide by the VF,
1402   /// or as a peel-loop to handle gaps in interleave-groups.
1403   /// Under optsize and when the trip count is very small we don't allow any
1404   /// iterations to execute in the scalar loop.
1405   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1406 
1407   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1408   bool FoldTailByMasking = false;
1409 
1410   /// A map holding scalar costs for different vectorization factors. The
1411   /// presence of a cost for an instruction in the mapping indicates that the
1412   /// instruction will be scalarized when vectorizing with the associated
1413   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1414   DenseMap<unsigned, ScalarCostsTy> InstsToScalarize;
1415 
1416   /// Holds the instructions known to be uniform after vectorization.
1417   /// The data is collected per VF.
1418   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms;
1419 
1420   /// Holds the instructions known to be scalar after vectorization.
1421   /// The data is collected per VF.
1422   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars;
1423 
1424   /// Holds the instructions (address computations) that are forced to be
1425   /// scalarized.
1426   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1427 
1428   /// Returns the expected difference in cost from scalarizing the expression
1429   /// feeding a predicated instruction \p PredInst. The instructions to
1430   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1431   /// non-negative return value implies the expression will be scalarized.
1432   /// Currently, only single-use chains are considered for scalarization.
1433   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1434                               unsigned VF);
1435 
1436   /// Collect the instructions that are uniform after vectorization. An
1437   /// instruction is uniform if we represent it with a single scalar value in
1438   /// the vectorized loop corresponding to each vector iteration. Examples of
1439   /// uniform instructions include pointer operands of consecutive or
1440   /// interleaved memory accesses. Note that although uniformity implies an
1441   /// instruction will be scalar, the reverse is not true. In general, a
1442   /// scalarized instruction will be represented by VF scalar values in the
1443   /// vectorized loop, each corresponding to an iteration of the original
1444   /// scalar loop.
1445   void collectLoopUniforms(unsigned VF);
1446 
1447   /// Collect the instructions that are scalar after vectorization. An
1448   /// instruction is scalar if it is known to be uniform or will be scalarized
1449   /// during vectorization. Non-uniform scalarized instructions will be
1450   /// represented by VF values in the vectorized loop, each corresponding to an
1451   /// iteration of the original scalar loop.
1452   void collectLoopScalars(unsigned VF);
1453 
1454   /// Keeps cost model vectorization decision and cost for instructions.
1455   /// Right now it is used for memory instructions only.
1456   using DecisionList = DenseMap<std::pair<Instruction *, unsigned>,
1457                                 std::pair<InstWidening, unsigned>>;
1458 
1459   DecisionList WideningDecisions;
1460 
1461   /// Returns true if \p V is expected to be vectorized and it needs to be
1462   /// extracted.
1463   bool needsExtract(Value *V, unsigned VF) const {
1464     Instruction *I = dyn_cast<Instruction>(V);
1465     if (VF == 1 || !I || !TheLoop->contains(I) || TheLoop->isLoopInvariant(I))
1466       return false;
1467 
1468     // Assume we can vectorize V (and hence we need extraction) if the
1469     // scalars are not computed yet. This can happen, because it is called
1470     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1471     // the scalars are collected. That should be a safe assumption in most
1472     // cases, because we check if the operands have vectorizable types
1473     // beforehand in LoopVectorizationLegality.
1474     return Scalars.find(VF) == Scalars.end() ||
1475            !isScalarAfterVectorization(I, VF);
1476   };
1477 
1478   /// Returns a range containing only operands needing to be extracted.
1479   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1480                                                    unsigned VF) {
1481     return SmallVector<Value *, 4>(make_filter_range(
1482         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1483   }
1484 
1485 public:
1486   /// The loop that we evaluate.
1487   Loop *TheLoop;
1488 
1489   /// Predicated scalar evolution analysis.
1490   PredicatedScalarEvolution &PSE;
1491 
1492   /// Loop Info analysis.
1493   LoopInfo *LI;
1494 
1495   /// Vectorization legality.
1496   LoopVectorizationLegality *Legal;
1497 
1498   /// Vector target information.
1499   const TargetTransformInfo &TTI;
1500 
1501   /// Target Library Info.
1502   const TargetLibraryInfo *TLI;
1503 
1504   /// Demanded bits analysis.
1505   DemandedBits *DB;
1506 
1507   /// Assumption cache.
1508   AssumptionCache *AC;
1509 
1510   /// Interface to emit optimization remarks.
1511   OptimizationRemarkEmitter *ORE;
1512 
1513   const Function *TheFunction;
1514 
1515   /// Loop Vectorize Hint.
1516   const LoopVectorizeHints *Hints;
1517 
1518   /// The interleave access information contains groups of interleaved accesses
1519   /// with the same stride and close to each other.
1520   InterleavedAccessInfo &InterleaveInfo;
1521 
1522   /// Values to ignore in the cost model.
1523   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1524 
1525   /// Values to ignore in the cost model when VF > 1.
1526   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1527 };
1528 
1529 } // end namespace llvm
1530 
1531 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
1532 // vectorization. The loop needs to be annotated with #pragma omp simd
1533 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
1534 // vector length information is not provided, vectorization is not considered
1535 // explicit. Interleave hints are not allowed either. These limitations will be
1536 // relaxed in the future.
1537 // Please, note that we are currently forced to abuse the pragma 'clang
1538 // vectorize' semantics. This pragma provides *auto-vectorization hints*
1539 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
1540 // provides *explicit vectorization hints* (LV can bypass legal checks and
1541 // assume that vectorization is legal). However, both hints are implemented
1542 // using the same metadata (llvm.loop.vectorize, processed by
1543 // LoopVectorizeHints). This will be fixed in the future when the native IR
1544 // representation for pragma 'omp simd' is introduced.
1545 static bool isExplicitVecOuterLoop(Loop *OuterLp,
1546                                    OptimizationRemarkEmitter *ORE) {
1547   assert(!OuterLp->empty() && "This is not an outer loop");
1548   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
1549 
1550   // Only outer loops with an explicit vectorization hint are supported.
1551   // Unannotated outer loops are ignored.
1552   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
1553     return false;
1554 
1555   Function *Fn = OuterLp->getHeader()->getParent();
1556   if (!Hints.allowVectorization(Fn, OuterLp,
1557                                 true /*VectorizeOnlyWhenForced*/)) {
1558     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
1559     return false;
1560   }
1561 
1562   if (Hints.getInterleave() > 1) {
1563     // TODO: Interleave support is future work.
1564     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
1565                          "outer loops.\n");
1566     Hints.emitRemarkWithHints();
1567     return false;
1568   }
1569 
1570   return true;
1571 }
1572 
1573 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
1574                                   OptimizationRemarkEmitter *ORE,
1575                                   SmallVectorImpl<Loop *> &V) {
1576   // Collect inner loops and outer loops without irreducible control flow. For
1577   // now, only collect outer loops that have explicit vectorization hints. If we
1578   // are stress testing the VPlan H-CFG construction, we collect the outermost
1579   // loop of every loop nest.
1580   if (L.empty() || VPlanBuildStressTest ||
1581       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
1582     LoopBlocksRPO RPOT(&L);
1583     RPOT.perform(LI);
1584     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
1585       V.push_back(&L);
1586       // TODO: Collect inner loops inside marked outer loops in case
1587       // vectorization fails for the outer loop. Do not invoke
1588       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
1589       // already known to be reducible. We can use an inherited attribute for
1590       // that.
1591       return;
1592     }
1593   }
1594   for (Loop *InnerL : L)
1595     collectSupportedLoops(*InnerL, LI, ORE, V);
1596 }
1597 
1598 namespace {
1599 
1600 /// The LoopVectorize Pass.
1601 struct LoopVectorize : public FunctionPass {
1602   /// Pass identification, replacement for typeid
1603   static char ID;
1604 
1605   LoopVectorizePass Impl;
1606 
1607   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
1608                          bool VectorizeOnlyWhenForced = false)
1609       : FunctionPass(ID),
1610         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
1611     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
1612   }
1613 
1614   bool runOnFunction(Function &F) override {
1615     if (skipFunction(F))
1616       return false;
1617 
1618     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1619     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1620     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1621     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1622     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
1623     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1624     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
1625     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1626     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1627     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
1628     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
1629     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
1630     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
1631 
1632     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
1633         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
1634 
1635     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
1636                         GetLAA, *ORE, PSI).MadeAnyChange;
1637   }
1638 
1639   void getAnalysisUsage(AnalysisUsage &AU) const override {
1640     AU.addRequired<AssumptionCacheTracker>();
1641     AU.addRequired<BlockFrequencyInfoWrapperPass>();
1642     AU.addRequired<DominatorTreeWrapperPass>();
1643     AU.addRequired<LoopInfoWrapperPass>();
1644     AU.addRequired<ScalarEvolutionWrapperPass>();
1645     AU.addRequired<TargetTransformInfoWrapperPass>();
1646     AU.addRequired<AAResultsWrapperPass>();
1647     AU.addRequired<LoopAccessLegacyAnalysis>();
1648     AU.addRequired<DemandedBitsWrapperPass>();
1649     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
1650     AU.addRequired<InjectTLIMappingsLegacy>();
1651 
1652     // We currently do not preserve loopinfo/dominator analyses with outer loop
1653     // vectorization. Until this is addressed, mark these analyses as preserved
1654     // only for non-VPlan-native path.
1655     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
1656     if (!EnableVPlanNativePath) {
1657       AU.addPreserved<LoopInfoWrapperPass>();
1658       AU.addPreserved<DominatorTreeWrapperPass>();
1659     }
1660 
1661     AU.addPreserved<BasicAAWrapperPass>();
1662     AU.addPreserved<GlobalsAAWrapperPass>();
1663     AU.addRequired<ProfileSummaryInfoWrapperPass>();
1664   }
1665 };
1666 
1667 } // end anonymous namespace
1668 
1669 //===----------------------------------------------------------------------===//
1670 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
1671 // LoopVectorizationCostModel and LoopVectorizationPlanner.
1672 //===----------------------------------------------------------------------===//
1673 
1674 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
1675   // We need to place the broadcast of invariant variables outside the loop,
1676   // but only if it's proven safe to do so. Else, broadcast will be inside
1677   // vector loop body.
1678   Instruction *Instr = dyn_cast<Instruction>(V);
1679   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
1680                      (!Instr ||
1681                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
1682   // Place the code for broadcasting invariant variables in the new preheader.
1683   IRBuilder<>::InsertPointGuard Guard(Builder);
1684   if (SafeToHoist)
1685     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1686 
1687   // Broadcast the scalar into all locations in the vector.
1688   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
1689 
1690   return Shuf;
1691 }
1692 
1693 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
1694     const InductionDescriptor &II, Value *Step, Instruction *EntryVal) {
1695   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1696          "Expected either an induction phi-node or a truncate of it!");
1697   Value *Start = II.getStartValue();
1698 
1699   // Construct the initial value of the vector IV in the vector loop preheader
1700   auto CurrIP = Builder.saveIP();
1701   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1702   if (isa<TruncInst>(EntryVal)) {
1703     assert(Start->getType()->isIntegerTy() &&
1704            "Truncation requires an integer type");
1705     auto *TruncType = cast<IntegerType>(EntryVal->getType());
1706     Step = Builder.CreateTrunc(Step, TruncType);
1707     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
1708   }
1709   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
1710   Value *SteppedStart =
1711       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
1712 
1713   // We create vector phi nodes for both integer and floating-point induction
1714   // variables. Here, we determine the kind of arithmetic we will perform.
1715   Instruction::BinaryOps AddOp;
1716   Instruction::BinaryOps MulOp;
1717   if (Step->getType()->isIntegerTy()) {
1718     AddOp = Instruction::Add;
1719     MulOp = Instruction::Mul;
1720   } else {
1721     AddOp = II.getInductionOpcode();
1722     MulOp = Instruction::FMul;
1723   }
1724 
1725   // Multiply the vectorization factor by the step using integer or
1726   // floating-point arithmetic as appropriate.
1727   Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF);
1728   Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
1729 
1730   // Create a vector splat to use in the induction update.
1731   //
1732   // FIXME: If the step is non-constant, we create the vector splat with
1733   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
1734   //        handle a constant vector splat.
1735   Value *SplatVF =
1736       isa<Constant>(Mul)
1737           ? ConstantVector::getSplat({VF, false}, cast<Constant>(Mul))
1738           : Builder.CreateVectorSplat(VF, Mul);
1739   Builder.restoreIP(CurrIP);
1740 
1741   // We may need to add the step a number of times, depending on the unroll
1742   // factor. The last of those goes into the PHI.
1743   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
1744                                     &*LoopVectorBody->getFirstInsertionPt());
1745   VecInd->setDebugLoc(EntryVal->getDebugLoc());
1746   Instruction *LastInduction = VecInd;
1747   for (unsigned Part = 0; Part < UF; ++Part) {
1748     VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction);
1749 
1750     if (isa<TruncInst>(EntryVal))
1751       addMetadata(LastInduction, EntryVal);
1752     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part);
1753 
1754     LastInduction = cast<Instruction>(addFastMathFlag(
1755         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
1756     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
1757   }
1758 
1759   // Move the last step to the end of the latch block. This ensures consistent
1760   // placement of all induction updates.
1761   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
1762   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
1763   auto *ICmp = cast<Instruction>(Br->getCondition());
1764   LastInduction->moveBefore(ICmp);
1765   LastInduction->setName("vec.ind.next");
1766 
1767   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
1768   VecInd->addIncoming(LastInduction, LoopVectorLatch);
1769 }
1770 
1771 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
1772   return Cost->isScalarAfterVectorization(I, VF) ||
1773          Cost->isProfitableToScalarize(I, VF);
1774 }
1775 
1776 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
1777   if (shouldScalarizeInstruction(IV))
1778     return true;
1779   auto isScalarInst = [&](User *U) -> bool {
1780     auto *I = cast<Instruction>(U);
1781     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
1782   };
1783   return llvm::any_of(IV->users(), isScalarInst);
1784 }
1785 
1786 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
1787     const InductionDescriptor &ID, const Instruction *EntryVal,
1788     Value *VectorLoopVal, unsigned Part, unsigned Lane) {
1789   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1790          "Expected either an induction phi-node or a truncate of it!");
1791 
1792   // This induction variable is not the phi from the original loop but the
1793   // newly-created IV based on the proof that casted Phi is equal to the
1794   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
1795   // re-uses the same InductionDescriptor that original IV uses but we don't
1796   // have to do any recording in this case - that is done when original IV is
1797   // processed.
1798   if (isa<TruncInst>(EntryVal))
1799     return;
1800 
1801   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
1802   if (Casts.empty())
1803     return;
1804   // Only the first Cast instruction in the Casts vector is of interest.
1805   // The rest of the Casts (if exist) have no uses outside the
1806   // induction update chain itself.
1807   Instruction *CastInst = *Casts.begin();
1808   if (Lane < UINT_MAX)
1809     VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal);
1810   else
1811     VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal);
1812 }
1813 
1814 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) {
1815   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
1816          "Primary induction variable must have an integer type");
1817 
1818   auto II = Legal->getInductionVars().find(IV);
1819   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
1820 
1821   auto ID = II->second;
1822   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
1823 
1824   // The value from the original loop to which we are mapping the new induction
1825   // variable.
1826   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
1827 
1828   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
1829 
1830   // Generate code for the induction step. Note that induction steps are
1831   // required to be loop-invariant
1832   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
1833     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
1834            "Induction step should be loop invariant");
1835     if (PSE.getSE()->isSCEVable(IV->getType())) {
1836       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
1837       return Exp.expandCodeFor(Step, Step->getType(),
1838                                LoopVectorPreHeader->getTerminator());
1839     }
1840     return cast<SCEVUnknown>(Step)->getValue();
1841   };
1842 
1843   // The scalar value to broadcast. This is derived from the canonical
1844   // induction variable. If a truncation type is given, truncate the canonical
1845   // induction variable and step. Otherwise, derive these values from the
1846   // induction descriptor.
1847   auto CreateScalarIV = [&](Value *&Step) -> Value * {
1848     Value *ScalarIV = Induction;
1849     if (IV != OldInduction) {
1850       ScalarIV = IV->getType()->isIntegerTy()
1851                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
1852                      : Builder.CreateCast(Instruction::SIToFP, Induction,
1853                                           IV->getType());
1854       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
1855       ScalarIV->setName("offset.idx");
1856     }
1857     if (Trunc) {
1858       auto *TruncType = cast<IntegerType>(Trunc->getType());
1859       assert(Step->getType()->isIntegerTy() &&
1860              "Truncation requires an integer step");
1861       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
1862       Step = Builder.CreateTrunc(Step, TruncType);
1863     }
1864     return ScalarIV;
1865   };
1866 
1867   // Create the vector values from the scalar IV, in the absence of creating a
1868   // vector IV.
1869   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
1870     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
1871     for (unsigned Part = 0; Part < UF; ++Part) {
1872       Value *EntryPart =
1873           getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode());
1874       VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart);
1875       if (Trunc)
1876         addMetadata(EntryPart, Trunc);
1877       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part);
1878     }
1879   };
1880 
1881   // Now do the actual transformations, and start with creating the step value.
1882   Value *Step = CreateStepValue(ID.getStep());
1883   if (VF <= 1) {
1884     Value *ScalarIV = CreateScalarIV(Step);
1885     CreateSplatIV(ScalarIV, Step);
1886     return;
1887   }
1888 
1889   // Determine if we want a scalar version of the induction variable. This is
1890   // true if the induction variable itself is not widened, or if it has at
1891   // least one user in the loop that is not widened.
1892   auto NeedsScalarIV = needsScalarInduction(EntryVal);
1893   if (!NeedsScalarIV) {
1894     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
1895     return;
1896   }
1897 
1898   // Try to create a new independent vector induction variable. If we can't
1899   // create the phi node, we will splat the scalar induction variable in each
1900   // loop iteration.
1901   if (!shouldScalarizeInstruction(EntryVal)) {
1902     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
1903     Value *ScalarIV = CreateScalarIV(Step);
1904     // Create scalar steps that can be used by instructions we will later
1905     // scalarize. Note that the addition of the scalar steps will not increase
1906     // the number of instructions in the loop in the common case prior to
1907     // InstCombine. We will be trading one vector extract for each scalar step.
1908     buildScalarSteps(ScalarIV, Step, EntryVal, ID);
1909     return;
1910   }
1911 
1912   // If we haven't yet vectorized the induction variable, splat the scalar
1913   // induction variable, and build the necessary step vectors.
1914   // TODO: Don't do it unless the vectorized IV is really required.
1915   Value *ScalarIV = CreateScalarIV(Step);
1916   CreateSplatIV(ScalarIV, Step);
1917   buildScalarSteps(ScalarIV, Step, EntryVal, ID);
1918 }
1919 
1920 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
1921                                           Instruction::BinaryOps BinOp) {
1922   // Create and check the types.
1923   auto *ValVTy = cast<VectorType>(Val->getType());
1924   int VLen = ValVTy->getNumElements();
1925 
1926   Type *STy = Val->getType()->getScalarType();
1927   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
1928          "Induction Step must be an integer or FP");
1929   assert(Step->getType() == STy && "Step has wrong type");
1930 
1931   SmallVector<Constant *, 8> Indices;
1932 
1933   if (STy->isIntegerTy()) {
1934     // Create a vector of consecutive numbers from zero to VF.
1935     for (int i = 0; i < VLen; ++i)
1936       Indices.push_back(ConstantInt::get(STy, StartIdx + i));
1937 
1938     // Add the consecutive indices to the vector value.
1939     Constant *Cv = ConstantVector::get(Indices);
1940     assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
1941     Step = Builder.CreateVectorSplat(VLen, Step);
1942     assert(Step->getType() == Val->getType() && "Invalid step vec");
1943     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
1944     // which can be found from the original scalar operations.
1945     Step = Builder.CreateMul(Cv, Step);
1946     return Builder.CreateAdd(Val, Step, "induction");
1947   }
1948 
1949   // Floating point induction.
1950   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
1951          "Binary Opcode should be specified for FP induction");
1952   // Create a vector of consecutive numbers from zero to VF.
1953   for (int i = 0; i < VLen; ++i)
1954     Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
1955 
1956   // Add the consecutive indices to the vector value.
1957   Constant *Cv = ConstantVector::get(Indices);
1958 
1959   Step = Builder.CreateVectorSplat(VLen, Step);
1960 
1961   // Floating point operations had to be 'fast' to enable the induction.
1962   FastMathFlags Flags;
1963   Flags.setFast();
1964 
1965   Value *MulOp = Builder.CreateFMul(Cv, Step);
1966   if (isa<Instruction>(MulOp))
1967     // Have to check, MulOp may be a constant
1968     cast<Instruction>(MulOp)->setFastMathFlags(Flags);
1969 
1970   Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
1971   if (isa<Instruction>(BOp))
1972     cast<Instruction>(BOp)->setFastMathFlags(Flags);
1973   return BOp;
1974 }
1975 
1976 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
1977                                            Instruction *EntryVal,
1978                                            const InductionDescriptor &ID) {
1979   // We shouldn't have to build scalar steps if we aren't vectorizing.
1980   assert(VF > 1 && "VF should be greater than one");
1981 
1982   // Get the value type and ensure it and the step have the same integer type.
1983   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
1984   assert(ScalarIVTy == Step->getType() &&
1985          "Val and Step should have the same type");
1986 
1987   // We build scalar steps for both integer and floating-point induction
1988   // variables. Here, we determine the kind of arithmetic we will perform.
1989   Instruction::BinaryOps AddOp;
1990   Instruction::BinaryOps MulOp;
1991   if (ScalarIVTy->isIntegerTy()) {
1992     AddOp = Instruction::Add;
1993     MulOp = Instruction::Mul;
1994   } else {
1995     AddOp = ID.getInductionOpcode();
1996     MulOp = Instruction::FMul;
1997   }
1998 
1999   // Determine the number of scalars we need to generate for each unroll
2000   // iteration. If EntryVal is uniform, we only need to generate the first
2001   // lane. Otherwise, we generate all VF values.
2002   unsigned Lanes =
2003       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1
2004                                                                          : VF;
2005   // Compute the scalar steps and save the results in VectorLoopValueMap.
2006   for (unsigned Part = 0; Part < UF; ++Part) {
2007     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2008       auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane);
2009       auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
2010       auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
2011       VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add);
2012       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane);
2013     }
2014   }
2015 }
2016 
2017 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) {
2018   assert(V != Induction && "The new induction variable should not be used.");
2019   assert(!V->getType()->isVectorTy() && "Can't widen a vector");
2020   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2021 
2022   // If we have a stride that is replaced by one, do it here. Defer this for
2023   // the VPlan-native path until we start running Legal checks in that path.
2024   if (!EnableVPlanNativePath && Legal->hasStride(V))
2025     V = ConstantInt::get(V->getType(), 1);
2026 
2027   // If we have a vector mapped to this value, return it.
2028   if (VectorLoopValueMap.hasVectorValue(V, Part))
2029     return VectorLoopValueMap.getVectorValue(V, Part);
2030 
2031   // If the value has not been vectorized, check if it has been scalarized
2032   // instead. If it has been scalarized, and we actually need the value in
2033   // vector form, we will construct the vector values on demand.
2034   if (VectorLoopValueMap.hasAnyScalarValue(V)) {
2035     Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0});
2036 
2037     // If we've scalarized a value, that value should be an instruction.
2038     auto *I = cast<Instruction>(V);
2039 
2040     // If we aren't vectorizing, we can just copy the scalar map values over to
2041     // the vector map.
2042     if (VF == 1) {
2043       VectorLoopValueMap.setVectorValue(V, Part, ScalarValue);
2044       return ScalarValue;
2045     }
2046 
2047     // Get the last scalar instruction we generated for V and Part. If the value
2048     // is known to be uniform after vectorization, this corresponds to lane zero
2049     // of the Part unroll iteration. Otherwise, the last instruction is the one
2050     // we created for the last vector lane of the Part unroll iteration.
2051     unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1;
2052     auto *LastInst = cast<Instruction>(
2053         VectorLoopValueMap.getScalarValue(V, {Part, LastLane}));
2054 
2055     // Set the insert point after the last scalarized instruction. This ensures
2056     // the insertelement sequence will directly follow the scalar definitions.
2057     auto OldIP = Builder.saveIP();
2058     auto NewIP = std::next(BasicBlock::iterator(LastInst));
2059     Builder.SetInsertPoint(&*NewIP);
2060 
2061     // However, if we are vectorizing, we need to construct the vector values.
2062     // If the value is known to be uniform after vectorization, we can just
2063     // broadcast the scalar value corresponding to lane zero for each unroll
2064     // iteration. Otherwise, we construct the vector values using insertelement
2065     // instructions. Since the resulting vectors are stored in
2066     // VectorLoopValueMap, we will only generate the insertelements once.
2067     Value *VectorValue = nullptr;
2068     if (Cost->isUniformAfterVectorization(I, VF)) {
2069       VectorValue = getBroadcastInstrs(ScalarValue);
2070       VectorLoopValueMap.setVectorValue(V, Part, VectorValue);
2071     } else {
2072       // Initialize packing with insertelements to start from undef.
2073       Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF));
2074       VectorLoopValueMap.setVectorValue(V, Part, Undef);
2075       for (unsigned Lane = 0; Lane < VF; ++Lane)
2076         packScalarIntoVectorValue(V, {Part, Lane});
2077       VectorValue = VectorLoopValueMap.getVectorValue(V, Part);
2078     }
2079     Builder.restoreIP(OldIP);
2080     return VectorValue;
2081   }
2082 
2083   // If this scalar is unknown, assume that it is a constant or that it is
2084   // loop invariant. Broadcast V and save the value for future uses.
2085   Value *B = getBroadcastInstrs(V);
2086   VectorLoopValueMap.setVectorValue(V, Part, B);
2087   return B;
2088 }
2089 
2090 Value *
2091 InnerLoopVectorizer::getOrCreateScalarValue(Value *V,
2092                                             const VPIteration &Instance) {
2093   // If the value is not an instruction contained in the loop, it should
2094   // already be scalar.
2095   if (OrigLoop->isLoopInvariant(V))
2096     return V;
2097 
2098   assert(Instance.Lane > 0
2099              ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
2100              : true && "Uniform values only have lane zero");
2101 
2102   // If the value from the original loop has not been vectorized, it is
2103   // represented by UF x VF scalar values in the new loop. Return the requested
2104   // scalar value.
2105   if (VectorLoopValueMap.hasScalarValue(V, Instance))
2106     return VectorLoopValueMap.getScalarValue(V, Instance);
2107 
2108   // If the value has not been scalarized, get its entry in VectorLoopValueMap
2109   // for the given unroll part. If this entry is not a vector type (i.e., the
2110   // vectorization factor is one), there is no need to generate an
2111   // extractelement instruction.
2112   auto *U = getOrCreateVectorValue(V, Instance.Part);
2113   if (!U->getType()->isVectorTy()) {
2114     assert(VF == 1 && "Value not scalarized has non-vector type");
2115     return U;
2116   }
2117 
2118   // Otherwise, the value from the original loop has been vectorized and is
2119   // represented by UF vector values. Extract and return the requested scalar
2120   // value from the appropriate vector lane.
2121   return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane));
2122 }
2123 
2124 void InnerLoopVectorizer::packScalarIntoVectorValue(
2125     Value *V, const VPIteration &Instance) {
2126   assert(V != Induction && "The new induction variable should not be used.");
2127   assert(!V->getType()->isVectorTy() && "Can't pack a vector");
2128   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2129 
2130   Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance);
2131   Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part);
2132   VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst,
2133                                             Builder.getInt32(Instance.Lane));
2134   VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue);
2135 }
2136 
2137 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2138   assert(Vec->getType()->isVectorTy() && "Invalid type");
2139   SmallVector<int, 8> ShuffleMask;
2140   for (unsigned i = 0; i < VF; ++i)
2141     ShuffleMask.push_back(VF - i - 1);
2142 
2143   return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
2144                                      ShuffleMask, "reverse");
2145 }
2146 
2147 // Return whether we allow using masked interleave-groups (for dealing with
2148 // strided loads/stores that reside in predicated blocks, or for dealing
2149 // with gaps).
2150 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2151   // If an override option has been passed in for interleaved accesses, use it.
2152   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2153     return EnableMaskedInterleavedMemAccesses;
2154 
2155   return TTI.enableMaskedInterleavedAccessVectorization();
2156 }
2157 
2158 // Try to vectorize the interleave group that \p Instr belongs to.
2159 //
2160 // E.g. Translate following interleaved load group (factor = 3):
2161 //   for (i = 0; i < N; i+=3) {
2162 //     R = Pic[i];             // Member of index 0
2163 //     G = Pic[i+1];           // Member of index 1
2164 //     B = Pic[i+2];           // Member of index 2
2165 //     ... // do something to R, G, B
2166 //   }
2167 // To:
2168 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2169 //   %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9>   ; R elements
2170 //   %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10>  ; G elements
2171 //   %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11>  ; B elements
2172 //
2173 // Or translate following interleaved store group (factor = 3):
2174 //   for (i = 0; i < N; i+=3) {
2175 //     ... do something to R, G, B
2176 //     Pic[i]   = R;           // Member of index 0
2177 //     Pic[i+1] = G;           // Member of index 1
2178 //     Pic[i+2] = B;           // Member of index 2
2179 //   }
2180 // To:
2181 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2182 //   %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u>
2183 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2184 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2185 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2186 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2187     const InterleaveGroup<Instruction> *Group, VPTransformState &State,
2188     VPValue *Addr, VPValue *BlockInMask) {
2189   Instruction *Instr = Group->getInsertPos();
2190   const DataLayout &DL = Instr->getModule()->getDataLayout();
2191 
2192   // Prepare for the vector type of the interleaved load/store.
2193   Type *ScalarTy = getMemInstValueType(Instr);
2194   unsigned InterleaveFactor = Group->getFactor();
2195   Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF);
2196 
2197   // Prepare for the new pointers.
2198   SmallVector<Value *, 2> AddrParts;
2199   unsigned Index = Group->getIndex(Instr);
2200 
2201   // TODO: extend the masked interleaved-group support to reversed access.
2202   assert((!BlockInMask || !Group->isReverse()) &&
2203          "Reversed masked interleave-group not supported.");
2204 
2205   // If the group is reverse, adjust the index to refer to the last vector lane
2206   // instead of the first. We adjust the index from the first vector lane,
2207   // rather than directly getting the pointer for lane VF - 1, because the
2208   // pointer operand of the interleaved access is supposed to be uniform. For
2209   // uniform instructions, we're only required to generate a value for the
2210   // first vector lane in each unroll iteration.
2211   if (Group->isReverse())
2212     Index += (VF - 1) * Group->getFactor();
2213 
2214   for (unsigned Part = 0; Part < UF; Part++) {
2215     Value *AddrPart = State.get(Addr, {Part, 0});
2216     setDebugLocFromInst(Builder, AddrPart);
2217 
2218     // Notice current instruction could be any index. Need to adjust the address
2219     // to the member of index 0.
2220     //
2221     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2222     //       b = A[i];       // Member of index 0
2223     // Current pointer is pointed to A[i+1], adjust it to A[i].
2224     //
2225     // E.g.  A[i+1] = a;     // Member of index 1
2226     //       A[i]   = b;     // Member of index 0
2227     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2228     // Current pointer is pointed to A[i+2], adjust it to A[i].
2229 
2230     bool InBounds = false;
2231     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2232       InBounds = gep->isInBounds();
2233     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2234     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2235 
2236     // Cast to the vector pointer type.
2237     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2238     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2239     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2240   }
2241 
2242   setDebugLocFromInst(Builder, Instr);
2243   Value *UndefVec = UndefValue::get(VecTy);
2244 
2245   Value *MaskForGaps = nullptr;
2246   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2247     MaskForGaps = createBitMaskForGaps(Builder, VF, *Group);
2248     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2249   }
2250 
2251   // Vectorize the interleaved load group.
2252   if (isa<LoadInst>(Instr)) {
2253     // For each unroll part, create a wide load for the group.
2254     SmallVector<Value *, 2> NewLoads;
2255     for (unsigned Part = 0; Part < UF; Part++) {
2256       Instruction *NewLoad;
2257       if (BlockInMask || MaskForGaps) {
2258         assert(useMaskedInterleavedAccesses(*TTI) &&
2259                "masked interleaved groups are not allowed.");
2260         Value *GroupMask = MaskForGaps;
2261         if (BlockInMask) {
2262           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2263           auto *Undefs = UndefValue::get(BlockInMaskPart->getType());
2264           Value *ShuffledMask = Builder.CreateShuffleVector(
2265               BlockInMaskPart, Undefs,
2266               createReplicatedMask(InterleaveFactor, VF), "interleaved.mask");
2267           GroupMask = MaskForGaps
2268                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2269                                                 MaskForGaps)
2270                           : ShuffledMask;
2271         }
2272         NewLoad =
2273             Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
2274                                      GroupMask, UndefVec, "wide.masked.vec");
2275       }
2276       else
2277         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2278                                             Group->getAlign(), "wide.vec");
2279       Group->addMetadata(NewLoad);
2280       NewLoads.push_back(NewLoad);
2281     }
2282 
2283     // For each member in the group, shuffle out the appropriate data from the
2284     // wide loads.
2285     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2286       Instruction *Member = Group->getMember(I);
2287 
2288       // Skip the gaps in the group.
2289       if (!Member)
2290         continue;
2291 
2292       auto StrideMask = createStrideMask(I, InterleaveFactor, VF);
2293       for (unsigned Part = 0; Part < UF; Part++) {
2294         Value *StridedVec = Builder.CreateShuffleVector(
2295             NewLoads[Part], UndefVec, StrideMask, "strided.vec");
2296 
2297         // If this member has different type, cast the result type.
2298         if (Member->getType() != ScalarTy) {
2299           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2300           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2301         }
2302 
2303         if (Group->isReverse())
2304           StridedVec = reverseVector(StridedVec);
2305 
2306         VectorLoopValueMap.setVectorValue(Member, Part, StridedVec);
2307       }
2308     }
2309     return;
2310   }
2311 
2312   // The sub vector type for current instruction.
2313   VectorType *SubVT = VectorType::get(ScalarTy, VF);
2314 
2315   // Vectorize the interleaved store group.
2316   for (unsigned Part = 0; Part < UF; Part++) {
2317     // Collect the stored vector from each member.
2318     SmallVector<Value *, 4> StoredVecs;
2319     for (unsigned i = 0; i < InterleaveFactor; i++) {
2320       // Interleaved store group doesn't allow a gap, so each index has a member
2321       Instruction *Member = Group->getMember(i);
2322       assert(Member && "Fail to get a member from an interleaved store group");
2323 
2324       Value *StoredVec = getOrCreateVectorValue(
2325           cast<StoreInst>(Member)->getValueOperand(), Part);
2326       if (Group->isReverse())
2327         StoredVec = reverseVector(StoredVec);
2328 
2329       // If this member has different type, cast it to a unified type.
2330 
2331       if (StoredVec->getType() != SubVT)
2332         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2333 
2334       StoredVecs.push_back(StoredVec);
2335     }
2336 
2337     // Concatenate all vectors into a wide vector.
2338     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2339 
2340     // Interleave the elements in the wide vector.
2341     Value *IVec = Builder.CreateShuffleVector(
2342         WideVec, UndefVec, createInterleaveMask(VF, InterleaveFactor),
2343         "interleaved.vec");
2344 
2345     Instruction *NewStoreInstr;
2346     if (BlockInMask) {
2347       Value *BlockInMaskPart = State.get(BlockInMask, Part);
2348       auto *Undefs = UndefValue::get(BlockInMaskPart->getType());
2349       Value *ShuffledMask = Builder.CreateShuffleVector(
2350           BlockInMaskPart, Undefs, createReplicatedMask(InterleaveFactor, VF),
2351           "interleaved.mask");
2352       NewStoreInstr = Builder.CreateMaskedStore(
2353           IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2354     }
2355     else
2356       NewStoreInstr =
2357           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2358 
2359     Group->addMetadata(NewStoreInstr);
2360   }
2361 }
2362 
2363 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
2364                                                      VPTransformState &State,
2365                                                      VPValue *Addr,
2366                                                      VPValue *StoredValue,
2367                                                      VPValue *BlockInMask) {
2368   // Attempt to issue a wide load.
2369   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2370   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2371 
2372   assert((LI || SI) && "Invalid Load/Store instruction");
2373   assert((!SI || StoredValue) && "No stored value provided for widened store");
2374   assert((!LI || !StoredValue) && "Stored value provided for widened load");
2375 
2376   LoopVectorizationCostModel::InstWidening Decision =
2377       Cost->getWideningDecision(Instr, VF);
2378   assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2379           Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2380           Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2381          "CM decision is not to widen the memory instruction");
2382 
2383   Type *ScalarDataTy = getMemInstValueType(Instr);
2384   Type *DataTy = VectorType::get(ScalarDataTy, VF);
2385   // An alignment of 0 means target abi alignment. We need to use the scalar's
2386   // target abi alignment in such a case.
2387   const DataLayout &DL = Instr->getModule()->getDataLayout();
2388   const Align Alignment =
2389       DL.getValueOrABITypeAlignment(getLoadStoreAlignment(Instr), ScalarDataTy);
2390 
2391   // Determine if the pointer operand of the access is either consecutive or
2392   // reverse consecutive.
2393   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2394   bool ConsecutiveStride =
2395       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2396   bool CreateGatherScatter =
2397       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2398 
2399   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2400   // gather/scatter. Otherwise Decision should have been to Scalarize.
2401   assert((ConsecutiveStride || CreateGatherScatter) &&
2402          "The instruction should be scalarized");
2403   (void)ConsecutiveStride;
2404 
2405   VectorParts BlockInMaskParts(UF);
2406   bool isMaskRequired = BlockInMask;
2407   if (isMaskRequired)
2408     for (unsigned Part = 0; Part < UF; ++Part)
2409       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2410 
2411   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2412     // Calculate the pointer for the specific unroll-part.
2413     GetElementPtrInst *PartPtr = nullptr;
2414 
2415     bool InBounds = false;
2416     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2417       InBounds = gep->isInBounds();
2418 
2419     if (Reverse) {
2420       // If the address is consecutive but reversed, then the
2421       // wide store needs to start at the last vector element.
2422       PartPtr = cast<GetElementPtrInst>(
2423           Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(-Part * VF)));
2424       PartPtr->setIsInBounds(InBounds);
2425       PartPtr = cast<GetElementPtrInst>(
2426           Builder.CreateGEP(ScalarDataTy, PartPtr, Builder.getInt32(1 - VF)));
2427       PartPtr->setIsInBounds(InBounds);
2428       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2429         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2430     } else {
2431       PartPtr = cast<GetElementPtrInst>(
2432           Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(Part * VF)));
2433       PartPtr->setIsInBounds(InBounds);
2434     }
2435 
2436     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2437     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2438   };
2439 
2440   // Handle Stores:
2441   if (SI) {
2442     setDebugLocFromInst(Builder, SI);
2443 
2444     for (unsigned Part = 0; Part < UF; ++Part) {
2445       Instruction *NewSI = nullptr;
2446       Value *StoredVal = State.get(StoredValue, Part);
2447       if (CreateGatherScatter) {
2448         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2449         Value *VectorGep = State.get(Addr, Part);
2450         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2451                                             MaskPart);
2452       } else {
2453         if (Reverse) {
2454           // If we store to reverse consecutive memory locations, then we need
2455           // to reverse the order of elements in the stored value.
2456           StoredVal = reverseVector(StoredVal);
2457           // We don't want to update the value in the map as it might be used in
2458           // another expression. So don't call resetVectorValue(StoredVal).
2459         }
2460         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
2461         if (isMaskRequired)
2462           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2463                                             BlockInMaskParts[Part]);
2464         else
2465           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2466       }
2467       addMetadata(NewSI, SI);
2468     }
2469     return;
2470   }
2471 
2472   // Handle loads.
2473   assert(LI && "Must have a load instruction");
2474   setDebugLocFromInst(Builder, LI);
2475   for (unsigned Part = 0; Part < UF; ++Part) {
2476     Value *NewLI;
2477     if (CreateGatherScatter) {
2478       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2479       Value *VectorGep = State.get(Addr, Part);
2480       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2481                                          nullptr, "wide.masked.gather");
2482       addMetadata(NewLI, LI);
2483     } else {
2484       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
2485       if (isMaskRequired)
2486         NewLI = Builder.CreateMaskedLoad(
2487             VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy),
2488             "wide.masked.load");
2489       else
2490         NewLI =
2491             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
2492 
2493       // Add metadata to the load, but setVectorValue to the reverse shuffle.
2494       addMetadata(NewLI, LI);
2495       if (Reverse)
2496         NewLI = reverseVector(NewLI);
2497     }
2498     VectorLoopValueMap.setVectorValue(Instr, Part, NewLI);
2499   }
2500 }
2501 
2502 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2503                                                const VPIteration &Instance,
2504                                                bool IfPredicateInstr) {
2505   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2506 
2507   setDebugLocFromInst(Builder, Instr);
2508 
2509   // Does this instruction return a value ?
2510   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2511 
2512   Instruction *Cloned = Instr->clone();
2513   if (!IsVoidRetTy)
2514     Cloned->setName(Instr->getName() + ".cloned");
2515 
2516   // Replace the operands of the cloned instructions with their scalar
2517   // equivalents in the new loop.
2518   for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
2519     auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance);
2520     Cloned->setOperand(op, NewOp);
2521   }
2522   addNewMetadata(Cloned, Instr);
2523 
2524   // Place the cloned scalar in the new loop.
2525   Builder.Insert(Cloned);
2526 
2527   // Add the cloned scalar to the scalar map entry.
2528   VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned);
2529 
2530   // If we just cloned a new assumption, add it the assumption cache.
2531   if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
2532     if (II->getIntrinsicID() == Intrinsic::assume)
2533       AC->registerAssumption(II);
2534 
2535   // End if-block.
2536   if (IfPredicateInstr)
2537     PredicatedInstructions.push_back(Cloned);
2538 }
2539 
2540 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
2541                                                       Value *End, Value *Step,
2542                                                       Instruction *DL) {
2543   BasicBlock *Header = L->getHeader();
2544   BasicBlock *Latch = L->getLoopLatch();
2545   // As we're just creating this loop, it's possible no latch exists
2546   // yet. If so, use the header as this will be a single block loop.
2547   if (!Latch)
2548     Latch = Header;
2549 
2550   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
2551   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
2552   setDebugLocFromInst(Builder, OldInst);
2553   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
2554 
2555   Builder.SetInsertPoint(Latch->getTerminator());
2556   setDebugLocFromInst(Builder, OldInst);
2557 
2558   // Create i+1 and fill the PHINode.
2559   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
2560   Induction->addIncoming(Start, L->getLoopPreheader());
2561   Induction->addIncoming(Next, Latch);
2562   // Create the compare.
2563   Value *ICmp = Builder.CreateICmpEQ(Next, End);
2564   Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
2565 
2566   // Now we have two terminators. Remove the old one from the block.
2567   Latch->getTerminator()->eraseFromParent();
2568 
2569   return Induction;
2570 }
2571 
2572 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
2573   if (TripCount)
2574     return TripCount;
2575 
2576   assert(L && "Create Trip Count for null loop.");
2577   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2578   // Find the loop boundaries.
2579   ScalarEvolution *SE = PSE.getSE();
2580   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2581   assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
2582          "Invalid loop count");
2583 
2584   Type *IdxTy = Legal->getWidestInductionType();
2585   assert(IdxTy && "No type for induction");
2586 
2587   // The exit count might have the type of i64 while the phi is i32. This can
2588   // happen if we have an induction variable that is sign extended before the
2589   // compare. The only way that we get a backedge taken count is that the
2590   // induction variable was signed and as such will not overflow. In such a case
2591   // truncation is legal.
2592   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
2593       IdxTy->getPrimitiveSizeInBits())
2594     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2595   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2596 
2597   // Get the total trip count from the count by adding 1.
2598   const SCEV *ExitCount = SE->getAddExpr(
2599       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2600 
2601   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
2602 
2603   // Expand the trip count and place the new instructions in the preheader.
2604   // Notice that the pre-header does not change, only the loop body.
2605   SCEVExpander Exp(*SE, DL, "induction");
2606 
2607   // Count holds the overall loop count (N).
2608   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2609                                 L->getLoopPreheader()->getTerminator());
2610 
2611   if (TripCount->getType()->isPointerTy())
2612     TripCount =
2613         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2614                                     L->getLoopPreheader()->getTerminator());
2615 
2616   return TripCount;
2617 }
2618 
2619 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
2620   if (VectorTripCount)
2621     return VectorTripCount;
2622 
2623   Value *TC = getOrCreateTripCount(L);
2624   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2625 
2626   Type *Ty = TC->getType();
2627   Constant *Step = ConstantInt::get(Ty, VF * UF);
2628 
2629   // If the tail is to be folded by masking, round the number of iterations N
2630   // up to a multiple of Step instead of rounding down. This is done by first
2631   // adding Step-1 and then rounding down. Note that it's ok if this addition
2632   // overflows: the vector induction variable will eventually wrap to zero given
2633   // that it starts at zero and its Step is a power of two; the loop will then
2634   // exit, with the last early-exit vector comparison also producing all-true.
2635   if (Cost->foldTailByMasking()) {
2636     assert(isPowerOf2_32(VF * UF) &&
2637            "VF*UF must be a power of 2 when folding tail by masking");
2638     TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up");
2639   }
2640 
2641   // Now we need to generate the expression for the part of the loop that the
2642   // vectorized body will execute. This is equal to N - (N % Step) if scalar
2643   // iterations are not required for correctness, or N - Step, otherwise. Step
2644   // is equal to the vectorization factor (number of SIMD elements) times the
2645   // unroll factor (number of SIMD instructions).
2646   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2647 
2648   // If there is a non-reversed interleaved group that may speculatively access
2649   // memory out-of-bounds, we need to ensure that there will be at least one
2650   // iteration of the scalar epilogue loop. Thus, if the step evenly divides
2651   // the trip count, we set the remainder to be equal to the step. If the step
2652   // does not evenly divide the trip count, no adjustment is necessary since
2653   // there will already be scalar iterations. Note that the minimum iterations
2654   // check ensures that N >= Step.
2655   if (VF > 1 && Cost->requiresScalarEpilogue()) {
2656     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2657     R = Builder.CreateSelect(IsZero, Step, R);
2658   }
2659 
2660   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2661 
2662   return VectorTripCount;
2663 }
2664 
2665 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
2666                                                    const DataLayout &DL) {
2667   // Verify that V is a vector type with same number of elements as DstVTy.
2668   unsigned VF = DstVTy->getNumElements();
2669   VectorType *SrcVecTy = cast<VectorType>(V->getType());
2670   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
2671   Type *SrcElemTy = SrcVecTy->getElementType();
2672   Type *DstElemTy = DstVTy->getElementType();
2673   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2674          "Vector elements must have same size");
2675 
2676   // Do a direct cast if element types are castable.
2677   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2678     return Builder.CreateBitOrPointerCast(V, DstVTy);
2679   }
2680   // V cannot be directly casted to desired vector type.
2681   // May happen when V is a floating point vector but DstVTy is a vector of
2682   // pointers or vice-versa. Handle this using a two-step bitcast using an
2683   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2684   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2685          "Only one type should be a pointer type");
2686   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2687          "Only one type should be a floating point type");
2688   Type *IntTy =
2689       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2690   VectorType *VecIntTy = VectorType::get(IntTy, VF);
2691   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2692   return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
2693 }
2694 
2695 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
2696                                                          BasicBlock *Bypass) {
2697   Value *Count = getOrCreateTripCount(L);
2698   // Reuse existing vector loop preheader for TC checks.
2699   // Note that new preheader block is generated for vector loop.
2700   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
2701   IRBuilder<> Builder(TCCheckBlock->getTerminator());
2702 
2703   // Generate code to check if the loop's trip count is less than VF * UF, or
2704   // equal to it in case a scalar epilogue is required; this implies that the
2705   // vector trip count is zero. This check also covers the case where adding one
2706   // to the backedge-taken count overflowed leading to an incorrect trip count
2707   // of zero. In this case we will also jump to the scalar loop.
2708   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
2709                                           : ICmpInst::ICMP_ULT;
2710 
2711   // If tail is to be folded, vector loop takes care of all iterations.
2712   Value *CheckMinIters = Builder.getFalse();
2713   if (!Cost->foldTailByMasking())
2714     CheckMinIters = Builder.CreateICmp(
2715         P, Count, ConstantInt::get(Count->getType(), VF * UF),
2716         "min.iters.check");
2717 
2718   // Create new preheader for vector loop.
2719   LoopVectorPreHeader =
2720       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
2721                  "vector.ph");
2722 
2723   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
2724                                DT->getNode(Bypass)->getIDom()) &&
2725          "TC check is expected to dominate Bypass");
2726 
2727   // Update dominator for Bypass & LoopExit.
2728   DT->changeImmediateDominator(Bypass, TCCheckBlock);
2729   DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
2730 
2731   ReplaceInstWithInst(
2732       TCCheckBlock->getTerminator(),
2733       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
2734   LoopBypassBlocks.push_back(TCCheckBlock);
2735 }
2736 
2737 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
2738   // Reuse existing vector loop preheader for SCEV checks.
2739   // Note that new preheader block is generated for vector loop.
2740   BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader;
2741 
2742   // Generate the code to check that the SCEV assumptions that we made.
2743   // We want the new basic block to start at the first instruction in a
2744   // sequence of instructions that form a check.
2745   SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
2746                    "scev.check");
2747   Value *SCEVCheck = Exp.expandCodeForPredicate(
2748       &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator());
2749 
2750   if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
2751     if (C->isZero())
2752       return;
2753 
2754   assert(!SCEVCheckBlock->getParent()->hasOptSize() &&
2755          "Cannot SCEV check stride or overflow when optimizing for size");
2756 
2757   SCEVCheckBlock->setName("vector.scevcheck");
2758   // Create new preheader for vector loop.
2759   LoopVectorPreHeader =
2760       SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI,
2761                  nullptr, "vector.ph");
2762 
2763   // Update dominator only if this is first RT check.
2764   if (LoopBypassBlocks.empty()) {
2765     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
2766     DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
2767   }
2768 
2769   ReplaceInstWithInst(
2770       SCEVCheckBlock->getTerminator(),
2771       BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck));
2772   LoopBypassBlocks.push_back(SCEVCheckBlock);
2773   AddedSafetyChecks = true;
2774 }
2775 
2776 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
2777   // VPlan-native path does not do any analysis for runtime checks currently.
2778   if (EnableVPlanNativePath)
2779     return;
2780 
2781   // Reuse existing vector loop preheader for runtime memory checks.
2782   // Note that new preheader block is generated for vector loop.
2783   BasicBlock *const MemCheckBlock = L->getLoopPreheader();
2784 
2785   // Generate the code that checks in runtime if arrays overlap. We put the
2786   // checks into a separate block to make the more common case of few elements
2787   // faster.
2788   Instruction *FirstCheckInst;
2789   Instruction *MemRuntimeCheck;
2790   std::tie(FirstCheckInst, MemRuntimeCheck) =
2791       Legal->getLAI()->addRuntimeChecks(MemCheckBlock->getTerminator());
2792   if (!MemRuntimeCheck)
2793     return;
2794 
2795   if (MemCheckBlock->getParent()->hasOptSize()) {
2796     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
2797            "Cannot emit memory checks when optimizing for size, unless forced "
2798            "to vectorize.");
2799     ORE->emit([&]() {
2800       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
2801                                         L->getStartLoc(), L->getHeader())
2802              << "Code-size may be reduced by not forcing "
2803                 "vectorization, or by source-code modifications "
2804                 "eliminating the need for runtime checks "
2805                 "(e.g., adding 'restrict').";
2806     });
2807   }
2808 
2809   MemCheckBlock->setName("vector.memcheck");
2810   // Create new preheader for vector loop.
2811   LoopVectorPreHeader =
2812       SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr,
2813                  "vector.ph");
2814 
2815   // Update dominator only if this is first RT check.
2816   if (LoopBypassBlocks.empty()) {
2817     DT->changeImmediateDominator(Bypass, MemCheckBlock);
2818     DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock);
2819   }
2820 
2821   ReplaceInstWithInst(
2822       MemCheckBlock->getTerminator(),
2823       BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheck));
2824   LoopBypassBlocks.push_back(MemCheckBlock);
2825   AddedSafetyChecks = true;
2826 
2827   // We currently don't use LoopVersioning for the actual loop cloning but we
2828   // still use it to add the noalias metadata.
2829   LVer = std::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
2830                                           PSE.getSE());
2831   LVer->prepareNoAliasMetadata();
2832 }
2833 
2834 Value *InnerLoopVectorizer::emitTransformedIndex(
2835     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
2836     const InductionDescriptor &ID) const {
2837 
2838   SCEVExpander Exp(*SE, DL, "induction");
2839   auto Step = ID.getStep();
2840   auto StartValue = ID.getStartValue();
2841   assert(Index->getType() == Step->getType() &&
2842          "Index type does not match StepValue type");
2843 
2844   // Note: the IR at this point is broken. We cannot use SE to create any new
2845   // SCEV and then expand it, hoping that SCEV's simplification will give us
2846   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2847   // lead to various SCEV crashes. So all we can do is to use builder and rely
2848   // on InstCombine for future simplifications. Here we handle some trivial
2849   // cases only.
2850   auto CreateAdd = [&B](Value *X, Value *Y) {
2851     assert(X->getType() == Y->getType() && "Types don't match!");
2852     if (auto *CX = dyn_cast<ConstantInt>(X))
2853       if (CX->isZero())
2854         return Y;
2855     if (auto *CY = dyn_cast<ConstantInt>(Y))
2856       if (CY->isZero())
2857         return X;
2858     return B.CreateAdd(X, Y);
2859   };
2860 
2861   auto CreateMul = [&B](Value *X, Value *Y) {
2862     assert(X->getType() == Y->getType() && "Types don't match!");
2863     if (auto *CX = dyn_cast<ConstantInt>(X))
2864       if (CX->isOne())
2865         return Y;
2866     if (auto *CY = dyn_cast<ConstantInt>(Y))
2867       if (CY->isOne())
2868         return X;
2869     return B.CreateMul(X, Y);
2870   };
2871 
2872   switch (ID.getKind()) {
2873   case InductionDescriptor::IK_IntInduction: {
2874     assert(Index->getType() == StartValue->getType() &&
2875            "Index type does not match StartValue type");
2876     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
2877       return B.CreateSub(StartValue, Index);
2878     auto *Offset = CreateMul(
2879         Index, Exp.expandCodeFor(Step, Index->getType(), &*B.GetInsertPoint()));
2880     return CreateAdd(StartValue, Offset);
2881   }
2882   case InductionDescriptor::IK_PtrInduction: {
2883     assert(isa<SCEVConstant>(Step) &&
2884            "Expected constant step for pointer induction");
2885     return B.CreateGEP(
2886         StartValue->getType()->getPointerElementType(), StartValue,
2887         CreateMul(Index, Exp.expandCodeFor(Step, Index->getType(),
2888                                            &*B.GetInsertPoint())));
2889   }
2890   case InductionDescriptor::IK_FpInduction: {
2891     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2892     auto InductionBinOp = ID.getInductionBinOp();
2893     assert(InductionBinOp &&
2894            (InductionBinOp->getOpcode() == Instruction::FAdd ||
2895             InductionBinOp->getOpcode() == Instruction::FSub) &&
2896            "Original bin op should be defined for FP induction");
2897 
2898     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
2899 
2900     // Floating point operations had to be 'fast' to enable the induction.
2901     FastMathFlags Flags;
2902     Flags.setFast();
2903 
2904     Value *MulExp = B.CreateFMul(StepValue, Index);
2905     if (isa<Instruction>(MulExp))
2906       // We have to check, the MulExp may be a constant.
2907       cast<Instruction>(MulExp)->setFastMathFlags(Flags);
2908 
2909     Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2910                                "induction");
2911     if (isa<Instruction>(BOp))
2912       cast<Instruction>(BOp)->setFastMathFlags(Flags);
2913 
2914     return BOp;
2915   }
2916   case InductionDescriptor::IK_NoInduction:
2917     return nullptr;
2918   }
2919   llvm_unreachable("invalid enum");
2920 }
2921 
2922 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
2923   /*
2924    In this function we generate a new loop. The new loop will contain
2925    the vectorized instructions while the old loop will continue to run the
2926    scalar remainder.
2927 
2928        [ ] <-- loop iteration number check.
2929     /   |
2930    /    v
2931   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
2932   |  /  |
2933   | /   v
2934   ||   [ ]     <-- vector pre header.
2935   |/    |
2936   |     v
2937   |    [  ] \
2938   |    [  ]_|   <-- vector loop.
2939   |     |
2940   |     v
2941   |   -[ ]   <--- middle-block.
2942   |  /  |
2943   | /   v
2944   -|- >[ ]     <--- new preheader.
2945    |    |
2946    |    v
2947    |   [ ] \
2948    |   [ ]_|   <-- old scalar loop to handle remainder.
2949     \   |
2950      \  v
2951       >[ ]     <-- exit block.
2952    ...
2953    */
2954 
2955   MDNode *OrigLoopID = OrigLoop->getLoopID();
2956 
2957   // Some loops have a single integer induction variable, while other loops
2958   // don't. One example is c++ iterators that often have multiple pointer
2959   // induction variables. In the code below we also support a case where we
2960   // don't have a single induction variable.
2961   //
2962   // We try to obtain an induction variable from the original loop as hard
2963   // as possible. However if we don't find one that:
2964   //   - is an integer
2965   //   - counts from zero, stepping by one
2966   //   - is the size of the widest induction variable type
2967   // then we create a new one.
2968   OldInduction = Legal->getPrimaryInduction();
2969   Type *IdxTy = Legal->getWidestInductionType();
2970 
2971   // Split the single block loop into the two loop structure described above.
2972   LoopScalarBody = OrigLoop->getHeader();
2973   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
2974   LoopExitBlock = OrigLoop->getExitBlock();
2975   assert(LoopExitBlock && "Must have an exit block");
2976   assert(LoopVectorPreHeader && "Invalid loop structure");
2977 
2978   LoopMiddleBlock =
2979       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
2980                  LI, nullptr, "middle.block");
2981   LoopScalarPreHeader =
2982       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
2983                  nullptr, "scalar.ph");
2984   // We intentionally don't let SplitBlock to update LoopInfo since
2985   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
2986   // LoopVectorBody is explicitly added to the correct place few lines later.
2987   LoopVectorBody =
2988       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
2989                  nullptr, nullptr, "vector.body");
2990 
2991   // Update dominator for loop exit.
2992   DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
2993 
2994   // Create and register the new vector loop.
2995   Loop *Lp = LI->AllocateLoop();
2996   Loop *ParentLoop = OrigLoop->getParentLoop();
2997 
2998   // Insert the new loop into the loop nest and register the new basic blocks
2999   // before calling any utilities such as SCEV that require valid LoopInfo.
3000   if (ParentLoop) {
3001     ParentLoop->addChildLoop(Lp);
3002   } else {
3003     LI->addTopLevelLoop(Lp);
3004   }
3005   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3006 
3007   // Find the loop boundaries.
3008   Value *Count = getOrCreateTripCount(Lp);
3009 
3010   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3011 
3012   // Now, compare the new count to zero. If it is zero skip the vector loop and
3013   // jump to the scalar loop. This check also covers the case where the
3014   // backedge-taken count is uint##_max: adding one to it will overflow leading
3015   // to an incorrect trip count of zero. In this (rare) case we will also jump
3016   // to the scalar loop.
3017   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3018 
3019   // Generate the code to check any assumptions that we've made for SCEV
3020   // expressions.
3021   emitSCEVChecks(Lp, LoopScalarPreHeader);
3022 
3023   // Generate the code that checks in runtime if arrays overlap. We put the
3024   // checks into a separate block to make the more common case of few elements
3025   // faster.
3026   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3027 
3028   // Generate the induction variable.
3029   // The loop step is equal to the vectorization factor (num of SIMD elements)
3030   // times the unroll factor (num of SIMD instructions).
3031   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3032   Constant *Step = ConstantInt::get(IdxTy, VF * UF);
3033   Induction =
3034       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3035                               getDebugLocFromInstOrOperands(OldInduction));
3036 
3037   // We are going to resume the execution of the scalar loop.
3038   // Go over all of the induction variables that we found and fix the
3039   // PHIs that are left in the scalar version of the loop.
3040   // The starting values of PHI nodes depend on the counter of the last
3041   // iteration in the vectorized loop.
3042   // If we come from a bypass edge then we need to start from the original
3043   // start value.
3044 
3045   // This variable saves the new starting index for the scalar loop. It is used
3046   // to test if there are any tail iterations left once the vector loop has
3047   // completed.
3048   for (auto &InductionEntry : Legal->getInductionVars()) {
3049     PHINode *OrigPhi = InductionEntry.first;
3050     InductionDescriptor II = InductionEntry.second;
3051 
3052     // Create phi nodes to merge from the  backedge-taken check block.
3053     PHINode *BCResumeVal =
3054         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3055                         LoopScalarPreHeader->getTerminator());
3056     // Copy original phi DL over to the new one.
3057     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3058     Value *&EndValue = IVEndValues[OrigPhi];
3059     if (OrigPhi == OldInduction) {
3060       // We know what the end value is.
3061       EndValue = CountRoundDown;
3062     } else {
3063       IRBuilder<> B(Lp->getLoopPreheader()->getTerminator());
3064       Type *StepType = II.getStep()->getType();
3065       Instruction::CastOps CastOp =
3066           CastInst::getCastOpcode(CountRoundDown, true, StepType, true);
3067       Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd");
3068       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3069       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3070       EndValue->setName("ind.end");
3071     }
3072 
3073     // The new PHI merges the original incoming value, in case of a bypass,
3074     // or the value at the end of the vectorized loop.
3075     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3076 
3077     // Fix the scalar body counter (PHI node).
3078     // The old induction's phi node in the scalar body needs the truncated
3079     // value.
3080     for (BasicBlock *BB : LoopBypassBlocks)
3081       BCResumeVal->addIncoming(II.getStartValue(), BB);
3082     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3083   }
3084 
3085   // We need the OrigLoop (scalar loop part) latch terminator to help
3086   // produce correct debug info for the middle block BB instructions.
3087   // The legality check stage guarantees that the loop will have a single
3088   // latch.
3089   assert(isa<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()) &&
3090          "Scalar loop latch terminator isn't a branch");
3091   BranchInst *ScalarLatchBr =
3092       cast<BranchInst>(OrigLoop->getLoopLatch()->getTerminator());
3093 
3094   // Add a check in the middle block to see if we have completed
3095   // all of the iterations in the first vector loop.
3096   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3097   // If tail is to be folded, we know we don't need to run the remainder.
3098   Value *CmpN = Builder.getTrue();
3099   if (!Cost->foldTailByMasking()) {
3100     CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
3101                            CountRoundDown, "cmp.n",
3102                            LoopMiddleBlock->getTerminator());
3103 
3104     // Here we use the same DebugLoc as the scalar loop latch branch instead
3105     // of the corresponding compare because they may have ended up with
3106     // different line numbers and we want to avoid awkward line stepping while
3107     // debugging. Eg. if the compare has got a line number inside the loop.
3108     cast<Instruction>(CmpN)->setDebugLoc(ScalarLatchBr->getDebugLoc());
3109   }
3110 
3111   BranchInst *BrInst =
3112       BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, CmpN);
3113   BrInst->setDebugLoc(ScalarLatchBr->getDebugLoc());
3114   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3115 
3116   // Get ready to start creating new instructions into the vectorized body.
3117   assert(LoopVectorPreHeader == Lp->getLoopPreheader() &&
3118          "Inconsistent vector loop preheader");
3119   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3120 
3121   Optional<MDNode *> VectorizedLoopID =
3122       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3123                                       LLVMLoopVectorizeFollowupVectorized});
3124   if (VectorizedLoopID.hasValue()) {
3125     Lp->setLoopID(VectorizedLoopID.getValue());
3126 
3127     // Do not setAlreadyVectorized if loop attributes have been defined
3128     // explicitly.
3129     return LoopVectorPreHeader;
3130   }
3131 
3132   // Keep all loop hints from the original loop on the vector loop (we'll
3133   // replace the vectorizer-specific hints below).
3134   if (MDNode *LID = OrigLoop->getLoopID())
3135     Lp->setLoopID(LID);
3136 
3137   LoopVectorizeHints Hints(Lp, true, *ORE);
3138   Hints.setAlreadyVectorized();
3139 
3140 #ifdef EXPENSIVE_CHECKS
3141   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3142   LI->verify(*DT);
3143 #endif
3144 
3145   return LoopVectorPreHeader;
3146 }
3147 
3148 // Fix up external users of the induction variable. At this point, we are
3149 // in LCSSA form, with all external PHIs that use the IV having one input value,
3150 // coming from the remainder loop. We need those PHIs to also have a correct
3151 // value for the IV when arriving directly from the middle block.
3152 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3153                                        const InductionDescriptor &II,
3154                                        Value *CountRoundDown, Value *EndValue,
3155                                        BasicBlock *MiddleBlock) {
3156   // There are two kinds of external IV usages - those that use the value
3157   // computed in the last iteration (the PHI) and those that use the penultimate
3158   // value (the value that feeds into the phi from the loop latch).
3159   // We allow both, but they, obviously, have different values.
3160 
3161   assert(OrigLoop->getExitBlock() && "Expected a single exit block");
3162 
3163   DenseMap<Value *, Value *> MissingVals;
3164 
3165   // An external user of the last iteration's value should see the value that
3166   // the remainder loop uses to initialize its own IV.
3167   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3168   for (User *U : PostInc->users()) {
3169     Instruction *UI = cast<Instruction>(U);
3170     if (!OrigLoop->contains(UI)) {
3171       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3172       MissingVals[UI] = EndValue;
3173     }
3174   }
3175 
3176   // An external user of the penultimate value need to see EndValue - Step.
3177   // The simplest way to get this is to recompute it from the constituent SCEVs,
3178   // that is Start + (Step * (CRD - 1)).
3179   for (User *U : OrigPhi->users()) {
3180     auto *UI = cast<Instruction>(U);
3181     if (!OrigLoop->contains(UI)) {
3182       const DataLayout &DL =
3183           OrigLoop->getHeader()->getModule()->getDataLayout();
3184       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3185 
3186       IRBuilder<> B(MiddleBlock->getTerminator());
3187       Value *CountMinusOne = B.CreateSub(
3188           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3189       Value *CMO =
3190           !II.getStep()->getType()->isIntegerTy()
3191               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3192                              II.getStep()->getType())
3193               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3194       CMO->setName("cast.cmo");
3195       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3196       Escape->setName("ind.escape");
3197       MissingVals[UI] = Escape;
3198     }
3199   }
3200 
3201   for (auto &I : MissingVals) {
3202     PHINode *PHI = cast<PHINode>(I.first);
3203     // One corner case we have to handle is two IVs "chasing" each-other,
3204     // that is %IV2 = phi [...], [ %IV1, %latch ]
3205     // In this case, if IV1 has an external use, we need to avoid adding both
3206     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3207     // don't already have an incoming value for the middle block.
3208     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3209       PHI->addIncoming(I.second, MiddleBlock);
3210   }
3211 }
3212 
3213 namespace {
3214 
3215 struct CSEDenseMapInfo {
3216   static bool canHandle(const Instruction *I) {
3217     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3218            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3219   }
3220 
3221   static inline Instruction *getEmptyKey() {
3222     return DenseMapInfo<Instruction *>::getEmptyKey();
3223   }
3224 
3225   static inline Instruction *getTombstoneKey() {
3226     return DenseMapInfo<Instruction *>::getTombstoneKey();
3227   }
3228 
3229   static unsigned getHashValue(const Instruction *I) {
3230     assert(canHandle(I) && "Unknown instruction!");
3231     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3232                                                            I->value_op_end()));
3233   }
3234 
3235   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3236     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3237         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3238       return LHS == RHS;
3239     return LHS->isIdenticalTo(RHS);
3240   }
3241 };
3242 
3243 } // end anonymous namespace
3244 
3245 ///Perform cse of induction variable instructions.
3246 static void cse(BasicBlock *BB) {
3247   // Perform simple cse.
3248   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3249   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3250     Instruction *In = &*I++;
3251 
3252     if (!CSEDenseMapInfo::canHandle(In))
3253       continue;
3254 
3255     // Check if we can replace this instruction with any of the
3256     // visited instructions.
3257     if (Instruction *V = CSEMap.lookup(In)) {
3258       In->replaceAllUsesWith(V);
3259       In->eraseFromParent();
3260       continue;
3261     }
3262 
3263     CSEMap[In] = In;
3264   }
3265 }
3266 
3267 unsigned LoopVectorizationCostModel::getVectorCallCost(CallInst *CI,
3268                                                        unsigned VF,
3269                                                        bool &NeedToScalarize) {
3270   Function *F = CI->getCalledFunction();
3271   Type *ScalarRetTy = CI->getType();
3272   SmallVector<Type *, 4> Tys, ScalarTys;
3273   for (auto &ArgOp : CI->arg_operands())
3274     ScalarTys.push_back(ArgOp->getType());
3275 
3276   // Estimate cost of scalarized vector call. The source operands are assumed
3277   // to be vectors, so we need to extract individual elements from there,
3278   // execute VF scalar calls, and then gather the result into the vector return
3279   // value.
3280   unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys);
3281   if (VF == 1)
3282     return ScalarCallCost;
3283 
3284   // Compute corresponding vector type for return value and arguments.
3285   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3286   for (Type *ScalarTy : ScalarTys)
3287     Tys.push_back(ToVectorTy(ScalarTy, VF));
3288 
3289   // Compute costs of unpacking argument values for the scalar calls and
3290   // packing the return values to a vector.
3291   unsigned ScalarizationCost = getScalarizationOverhead(CI, VF);
3292 
3293   unsigned Cost = ScalarCallCost * VF + ScalarizationCost;
3294 
3295   // If we can't emit a vector call for this function, then the currently found
3296   // cost is the cost we need to return.
3297   NeedToScalarize = true;
3298   VFShape Shape = VFShape::get(*CI, {VF, false}, false /*HasGlobalPred*/);
3299   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3300 
3301   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3302     return Cost;
3303 
3304   // If the corresponding vector cost is cheaper, return its cost.
3305   unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys);
3306   if (VectorCallCost < Cost) {
3307     NeedToScalarize = false;
3308     return VectorCallCost;
3309   }
3310   return Cost;
3311 }
3312 
3313 unsigned LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3314                                                             unsigned VF) {
3315   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3316   assert(ID && "Expected intrinsic call!");
3317 
3318   FastMathFlags FMF;
3319   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3320     FMF = FPMO->getFastMathFlags();
3321 
3322   SmallVector<Value *, 4> Operands(CI->arg_operands());
3323   return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF, CI);
3324 }
3325 
3326 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3327   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3328   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3329   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3330 }
3331 
3332 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3333   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3334   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3335   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3336 }
3337 
3338 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
3339   // For every instruction `I` in MinBWs, truncate the operands, create a
3340   // truncated version of `I` and reextend its result. InstCombine runs
3341   // later and will remove any ext/trunc pairs.
3342   SmallPtrSet<Value *, 4> Erased;
3343   for (const auto &KV : Cost->getMinimalBitwidths()) {
3344     // If the value wasn't vectorized, we must maintain the original scalar
3345     // type. The absence of the value from VectorLoopValueMap indicates that it
3346     // wasn't vectorized.
3347     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3348       continue;
3349     for (unsigned Part = 0; Part < UF; ++Part) {
3350       Value *I = getOrCreateVectorValue(KV.first, Part);
3351       if (Erased.find(I) != Erased.end() || I->use_empty() ||
3352           !isa<Instruction>(I))
3353         continue;
3354       Type *OriginalTy = I->getType();
3355       Type *ScalarTruncatedTy =
3356           IntegerType::get(OriginalTy->getContext(), KV.second);
3357       Type *TruncatedTy = VectorType::get(
3358           ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getNumElements());
3359       if (TruncatedTy == OriginalTy)
3360         continue;
3361 
3362       IRBuilder<> B(cast<Instruction>(I));
3363       auto ShrinkOperand = [&](Value *V) -> Value * {
3364         if (auto *ZI = dyn_cast<ZExtInst>(V))
3365           if (ZI->getSrcTy() == TruncatedTy)
3366             return ZI->getOperand(0);
3367         return B.CreateZExtOrTrunc(V, TruncatedTy);
3368       };
3369 
3370       // The actual instruction modification depends on the instruction type,
3371       // unfortunately.
3372       Value *NewI = nullptr;
3373       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3374         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3375                              ShrinkOperand(BO->getOperand(1)));
3376 
3377         // Any wrapping introduced by shrinking this operation shouldn't be
3378         // considered undefined behavior. So, we can't unconditionally copy
3379         // arithmetic wrapping flags to NewI.
3380         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3381       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3382         NewI =
3383             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3384                          ShrinkOperand(CI->getOperand(1)));
3385       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3386         NewI = B.CreateSelect(SI->getCondition(),
3387                               ShrinkOperand(SI->getTrueValue()),
3388                               ShrinkOperand(SI->getFalseValue()));
3389       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3390         switch (CI->getOpcode()) {
3391         default:
3392           llvm_unreachable("Unhandled cast!");
3393         case Instruction::Trunc:
3394           NewI = ShrinkOperand(CI->getOperand(0));
3395           break;
3396         case Instruction::SExt:
3397           NewI = B.CreateSExtOrTrunc(
3398               CI->getOperand(0),
3399               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3400           break;
3401         case Instruction::ZExt:
3402           NewI = B.CreateZExtOrTrunc(
3403               CI->getOperand(0),
3404               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3405           break;
3406         }
3407       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3408         auto Elements0 =
3409             cast<VectorType>(SI->getOperand(0)->getType())->getNumElements();
3410         auto *O0 = B.CreateZExtOrTrunc(
3411             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3412         auto Elements1 =
3413             cast<VectorType>(SI->getOperand(1)->getType())->getNumElements();
3414         auto *O1 = B.CreateZExtOrTrunc(
3415             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3416 
3417         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3418       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3419         // Don't do anything with the operands, just extend the result.
3420         continue;
3421       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3422         auto Elements =
3423             cast<VectorType>(IE->getOperand(0)->getType())->getNumElements();
3424         auto *O0 = B.CreateZExtOrTrunc(
3425             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3426         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3427         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3428       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3429         auto Elements =
3430             cast<VectorType>(EE->getOperand(0)->getType())->getNumElements();
3431         auto *O0 = B.CreateZExtOrTrunc(
3432             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3433         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3434       } else {
3435         // If we don't know what to do, be conservative and don't do anything.
3436         continue;
3437       }
3438 
3439       // Lastly, extend the result.
3440       NewI->takeName(cast<Instruction>(I));
3441       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3442       I->replaceAllUsesWith(Res);
3443       cast<Instruction>(I)->eraseFromParent();
3444       Erased.insert(I);
3445       VectorLoopValueMap.resetVectorValue(KV.first, Part, Res);
3446     }
3447   }
3448 
3449   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3450   for (const auto &KV : Cost->getMinimalBitwidths()) {
3451     // If the value wasn't vectorized, we must maintain the original scalar
3452     // type. The absence of the value from VectorLoopValueMap indicates that it
3453     // wasn't vectorized.
3454     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3455       continue;
3456     for (unsigned Part = 0; Part < UF; ++Part) {
3457       Value *I = getOrCreateVectorValue(KV.first, Part);
3458       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3459       if (Inst && Inst->use_empty()) {
3460         Value *NewI = Inst->getOperand(0);
3461         Inst->eraseFromParent();
3462         VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI);
3463       }
3464     }
3465   }
3466 }
3467 
3468 void InnerLoopVectorizer::fixVectorizedLoop() {
3469   // Insert truncates and extends for any truncated instructions as hints to
3470   // InstCombine.
3471   if (VF > 1)
3472     truncateToMinimalBitwidths();
3473 
3474   // Fix widened non-induction PHIs by setting up the PHI operands.
3475   if (OrigPHIsToFix.size()) {
3476     assert(EnableVPlanNativePath &&
3477            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3478     fixNonInductionPHIs();
3479   }
3480 
3481   // At this point every instruction in the original loop is widened to a
3482   // vector form. Now we need to fix the recurrences in the loop. These PHI
3483   // nodes are currently empty because we did not want to introduce cycles.
3484   // This is the second stage of vectorizing recurrences.
3485   fixCrossIterationPHIs();
3486 
3487   // Forget the original basic block.
3488   PSE.getSE()->forgetLoop(OrigLoop);
3489 
3490   // Fix-up external users of the induction variables.
3491   for (auto &Entry : Legal->getInductionVars())
3492     fixupIVUsers(Entry.first, Entry.second,
3493                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3494                  IVEndValues[Entry.first], LoopMiddleBlock);
3495 
3496   fixLCSSAPHIs();
3497   for (Instruction *PI : PredicatedInstructions)
3498     sinkScalarOperands(&*PI);
3499 
3500   // Remove redundant induction instructions.
3501   cse(LoopVectorBody);
3502 
3503   // Set/update profile weights for the vector and remainder loops as original
3504   // loop iterations are now distributed among them. Note that original loop
3505   // represented by LoopScalarBody becomes remainder loop after vectorization.
3506   //
3507   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
3508   // end up getting slightly roughened result but that should be OK since
3509   // profile is not inherently precise anyway. Note also possible bypass of
3510   // vector code caused by legality checks is ignored, assigning all the weight
3511   // to the vector loop, optimistically.
3512   setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody),
3513                                LI->getLoopFor(LoopVectorBody),
3514                                LI->getLoopFor(LoopScalarBody), VF * UF);
3515 }
3516 
3517 void InnerLoopVectorizer::fixCrossIterationPHIs() {
3518   // In order to support recurrences we need to be able to vectorize Phi nodes.
3519   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3520   // stage #2: We now need to fix the recurrences by adding incoming edges to
3521   // the currently empty PHI nodes. At this point every instruction in the
3522   // original loop is widened to a vector form so we can use them to construct
3523   // the incoming edges.
3524   for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
3525     // Handle first-order recurrences and reductions that need to be fixed.
3526     if (Legal->isFirstOrderRecurrence(&Phi))
3527       fixFirstOrderRecurrence(&Phi);
3528     else if (Legal->isReductionVariable(&Phi))
3529       fixReduction(&Phi);
3530   }
3531 }
3532 
3533 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
3534   // This is the second phase of vectorizing first-order recurrences. An
3535   // overview of the transformation is described below. Suppose we have the
3536   // following loop.
3537   //
3538   //   for (int i = 0; i < n; ++i)
3539   //     b[i] = a[i] - a[i - 1];
3540   //
3541   // There is a first-order recurrence on "a". For this loop, the shorthand
3542   // scalar IR looks like:
3543   //
3544   //   scalar.ph:
3545   //     s_init = a[-1]
3546   //     br scalar.body
3547   //
3548   //   scalar.body:
3549   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3550   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3551   //     s2 = a[i]
3552   //     b[i] = s2 - s1
3553   //     br cond, scalar.body, ...
3554   //
3555   // In this example, s1 is a recurrence because it's value depends on the
3556   // previous iteration. In the first phase of vectorization, we created a
3557   // temporary value for s1. We now complete the vectorization and produce the
3558   // shorthand vector IR shown below (for VF = 4, UF = 1).
3559   //
3560   //   vector.ph:
3561   //     v_init = vector(..., ..., ..., a[-1])
3562   //     br vector.body
3563   //
3564   //   vector.body
3565   //     i = phi [0, vector.ph], [i+4, vector.body]
3566   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
3567   //     v2 = a[i, i+1, i+2, i+3];
3568   //     v3 = vector(v1(3), v2(0, 1, 2))
3569   //     b[i, i+1, i+2, i+3] = v2 - v3
3570   //     br cond, vector.body, middle.block
3571   //
3572   //   middle.block:
3573   //     x = v2(3)
3574   //     br scalar.ph
3575   //
3576   //   scalar.ph:
3577   //     s_init = phi [x, middle.block], [a[-1], otherwise]
3578   //     br scalar.body
3579   //
3580   // After execution completes the vector loop, we extract the next value of
3581   // the recurrence (x) to use as the initial value in the scalar loop.
3582 
3583   // Get the original loop preheader and single loop latch.
3584   auto *Preheader = OrigLoop->getLoopPreheader();
3585   auto *Latch = OrigLoop->getLoopLatch();
3586 
3587   // Get the initial and previous values of the scalar recurrence.
3588   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
3589   auto *Previous = Phi->getIncomingValueForBlock(Latch);
3590 
3591   // Create a vector from the initial value.
3592   auto *VectorInit = ScalarInit;
3593   if (VF > 1) {
3594     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3595     VectorInit = Builder.CreateInsertElement(
3596         UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
3597         Builder.getInt32(VF - 1), "vector.recur.init");
3598   }
3599 
3600   // We constructed a temporary phi node in the first phase of vectorization.
3601   // This phi node will eventually be deleted.
3602   Builder.SetInsertPoint(
3603       cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0)));
3604 
3605   // Create a phi node for the new recurrence. The current value will either be
3606   // the initial value inserted into a vector or loop-varying vector value.
3607   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
3608   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
3609 
3610   // Get the vectorized previous value of the last part UF - 1. It appears last
3611   // among all unrolled iterations, due to the order of their construction.
3612   Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1);
3613 
3614   // Find and set the insertion point after the previous value if it is an
3615   // instruction.
3616   BasicBlock::iterator InsertPt;
3617   // Note that the previous value may have been constant-folded so it is not
3618   // guaranteed to be an instruction in the vector loop.
3619   // FIXME: Loop invariant values do not form recurrences. We should deal with
3620   //        them earlier.
3621   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
3622     InsertPt = LoopVectorBody->getFirstInsertionPt();
3623   else {
3624     Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
3625     if (isa<PHINode>(PreviousLastPart))
3626       // If the previous value is a phi node, we should insert after all the phi
3627       // nodes in the block containing the PHI to avoid breaking basic block
3628       // verification. Note that the basic block may be different to
3629       // LoopVectorBody, in case we predicate the loop.
3630       InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
3631     else
3632       InsertPt = ++PreviousInst->getIterator();
3633   }
3634   Builder.SetInsertPoint(&*InsertPt);
3635 
3636   // We will construct a vector for the recurrence by combining the values for
3637   // the current and previous iterations. This is the required shuffle mask.
3638   SmallVector<int, 8> ShuffleMask(VF);
3639   ShuffleMask[0] = VF - 1;
3640   for (unsigned I = 1; I < VF; ++I)
3641     ShuffleMask[I] = I + VF - 1;
3642 
3643   // The vector from which to take the initial value for the current iteration
3644   // (actual or unrolled). Initially, this is the vector phi node.
3645   Value *Incoming = VecPhi;
3646 
3647   // Shuffle the current and previous vector and update the vector parts.
3648   for (unsigned Part = 0; Part < UF; ++Part) {
3649     Value *PreviousPart = getOrCreateVectorValue(Previous, Part);
3650     Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part);
3651     auto *Shuffle = VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart,
3652                                                          ShuffleMask)
3653                            : Incoming;
3654     PhiPart->replaceAllUsesWith(Shuffle);
3655     cast<Instruction>(PhiPart)->eraseFromParent();
3656     VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle);
3657     Incoming = PreviousPart;
3658   }
3659 
3660   // Fix the latch value of the new recurrence in the vector loop.
3661   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3662 
3663   // Extract the last vector element in the middle block. This will be the
3664   // initial value for the recurrence when jumping to the scalar loop.
3665   auto *ExtractForScalar = Incoming;
3666   if (VF > 1) {
3667     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3668     ExtractForScalar = Builder.CreateExtractElement(
3669         ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract");
3670   }
3671   // Extract the second last element in the middle block if the
3672   // Phi is used outside the loop. We need to extract the phi itself
3673   // and not the last element (the phi update in the current iteration). This
3674   // will be the value when jumping to the exit block from the LoopMiddleBlock,
3675   // when the scalar loop is not run at all.
3676   Value *ExtractForPhiUsedOutsideLoop = nullptr;
3677   if (VF > 1)
3678     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
3679         Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi");
3680   // When loop is unrolled without vectorizing, initialize
3681   // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
3682   // `Incoming`. This is analogous to the vectorized case above: extracting the
3683   // second last element when VF > 1.
3684   else if (UF > 1)
3685     ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2);
3686 
3687   // Fix the initial value of the original recurrence in the scalar loop.
3688   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
3689   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
3690   for (auto *BB : predecessors(LoopScalarPreHeader)) {
3691     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
3692     Start->addIncoming(Incoming, BB);
3693   }
3694 
3695   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
3696   Phi->setName("scalar.recur");
3697 
3698   // Finally, fix users of the recurrence outside the loop. The users will need
3699   // either the last value of the scalar recurrence or the last value of the
3700   // vector recurrence we extracted in the middle block. Since the loop is in
3701   // LCSSA form, we just need to find all the phi nodes for the original scalar
3702   // recurrence in the exit block, and then add an edge for the middle block.
3703   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3704     if (LCSSAPhi.getIncomingValue(0) == Phi) {
3705       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
3706     }
3707   }
3708 }
3709 
3710 void InnerLoopVectorizer::fixReduction(PHINode *Phi) {
3711   Constant *Zero = Builder.getInt32(0);
3712 
3713   // Get it's reduction variable descriptor.
3714   assert(Legal->isReductionVariable(Phi) &&
3715          "Unable to find the reduction variable");
3716   RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
3717 
3718   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3719   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
3720   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
3721   RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
3722     RdxDesc.getMinMaxRecurrenceKind();
3723   setDebugLocFromInst(Builder, ReductionStartValue);
3724 
3725   // We need to generate a reduction vector from the incoming scalar.
3726   // To do so, we need to generate the 'identity' vector and override
3727   // one of the elements with the incoming scalar reduction. We need
3728   // to do it in the vector-loop preheader.
3729   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3730 
3731   // This is the vector-clone of the value that leaves the loop.
3732   Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType();
3733 
3734   // Find the reduction identity variable. Zero for addition, or, xor,
3735   // one for multiplication, -1 for And.
3736   Value *Identity;
3737   Value *VectorStart;
3738   if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
3739       RK == RecurrenceDescriptor::RK_FloatMinMax) {
3740     // MinMax reduction have the start value as their identify.
3741     if (VF == 1) {
3742       VectorStart = Identity = ReductionStartValue;
3743     } else {
3744       VectorStart = Identity =
3745         Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident");
3746     }
3747   } else {
3748     // Handle other reduction kinds:
3749     Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
3750         RK, VecTy->getScalarType());
3751     if (VF == 1) {
3752       Identity = Iden;
3753       // This vector is the Identity vector where the first element is the
3754       // incoming scalar reduction.
3755       VectorStart = ReductionStartValue;
3756     } else {
3757       Identity = ConstantVector::getSplat({VF, false}, Iden);
3758 
3759       // This vector is the Identity vector where the first element is the
3760       // incoming scalar reduction.
3761       VectorStart =
3762         Builder.CreateInsertElement(Identity, ReductionStartValue, Zero);
3763     }
3764   }
3765 
3766   // Wrap flags are in general invalid after vectorization, clear them.
3767   clearReductionWrapFlags(RdxDesc);
3768 
3769   // Fix the vector-loop phi.
3770 
3771   // Reductions do not have to start at zero. They can start with
3772   // any loop invariant values.
3773   BasicBlock *Latch = OrigLoop->getLoopLatch();
3774   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
3775 
3776   for (unsigned Part = 0; Part < UF; ++Part) {
3777     Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part);
3778     Value *Val = getOrCreateVectorValue(LoopVal, Part);
3779     // Make sure to add the reduction start value only to the
3780     // first unroll part.
3781     Value *StartVal = (Part == 0) ? VectorStart : Identity;
3782     cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader);
3783     cast<PHINode>(VecRdxPhi)
3784       ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3785   }
3786 
3787   // Before each round, move the insertion point right between
3788   // the PHIs and the values we are going to write.
3789   // This allows us to write both PHINodes and the extractelement
3790   // instructions.
3791   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3792 
3793   setDebugLocFromInst(Builder, LoopExitInst);
3794 
3795   // If tail is folded by masking, the vector value to leave the loop should be
3796   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
3797   // instead of the former.
3798   if (Cost->foldTailByMasking()) {
3799     for (unsigned Part = 0; Part < UF; ++Part) {
3800       Value *VecLoopExitInst =
3801           VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3802       Value *Sel = nullptr;
3803       for (User *U : VecLoopExitInst->users()) {
3804         if (isa<SelectInst>(U)) {
3805           assert(!Sel && "Reduction exit feeding two selects");
3806           Sel = U;
3807         } else
3808           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
3809       }
3810       assert(Sel && "Reduction exit feeds no select");
3811       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, Sel);
3812     }
3813   }
3814 
3815   // If the vector reduction can be performed in a smaller type, we truncate
3816   // then extend the loop exit value to enable InstCombine to evaluate the
3817   // entire expression in the smaller type.
3818   if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) {
3819     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
3820     Builder.SetInsertPoint(
3821         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
3822     VectorParts RdxParts(UF);
3823     for (unsigned Part = 0; Part < UF; ++Part) {
3824       RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3825       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3826       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
3827                                         : Builder.CreateZExt(Trunc, VecTy);
3828       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
3829            UI != RdxParts[Part]->user_end();)
3830         if (*UI != Trunc) {
3831           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
3832           RdxParts[Part] = Extnd;
3833         } else {
3834           ++UI;
3835         }
3836     }
3837     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3838     for (unsigned Part = 0; Part < UF; ++Part) {
3839       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3840       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]);
3841     }
3842   }
3843 
3844   // Reduce all of the unrolled parts into a single vector.
3845   Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0);
3846   unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
3847 
3848   // The middle block terminator has already been assigned a DebugLoc here (the
3849   // OrigLoop's single latch terminator). We want the whole middle block to
3850   // appear to execute on this line because: (a) it is all compiler generated,
3851   // (b) these instructions are always executed after evaluating the latch
3852   // conditional branch, and (c) other passes may add new predecessors which
3853   // terminate on this line. This is the easiest way to ensure we don't
3854   // accidentally cause an extra step back into the loop while debugging.
3855   setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
3856   for (unsigned Part = 1; Part < UF; ++Part) {
3857     Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3858     if (Op != Instruction::ICmp && Op != Instruction::FCmp)
3859       // Floating point operations had to be 'fast' to enable the reduction.
3860       ReducedPartRdx = addFastMathFlag(
3861           Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart,
3862                               ReducedPartRdx, "bin.rdx"),
3863           RdxDesc.getFastMathFlags());
3864     else
3865       ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx,
3866                                       RdxPart);
3867   }
3868 
3869   if (VF > 1) {
3870     bool NoNaN = Legal->hasFunNoNaNAttr();
3871     ReducedPartRdx =
3872         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN);
3873     // If the reduction can be performed in a smaller type, we need to extend
3874     // the reduction to the wider type before we branch to the original loop.
3875     if (Phi->getType() != RdxDesc.getRecurrenceType())
3876       ReducedPartRdx =
3877         RdxDesc.isSigned()
3878         ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
3879         : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
3880   }
3881 
3882   // Create a phi node that merges control-flow from the backedge-taken check
3883   // block and the middle block.
3884   PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
3885                                         LoopScalarPreHeader->getTerminator());
3886   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
3887     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
3888   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
3889 
3890   // Now, we need to fix the users of the reduction variable
3891   // inside and outside of the scalar remainder loop.
3892   // We know that the loop is in LCSSA form. We need to update the
3893   // PHI nodes in the exit blocks.
3894   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3895     // All PHINodes need to have a single entry edge, or two if
3896     // we already fixed them.
3897     assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
3898 
3899     // We found a reduction value exit-PHI. Update it with the
3900     // incoming bypass edge.
3901     if (LCSSAPhi.getIncomingValue(0) == LoopExitInst)
3902       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
3903   } // end of the LCSSA phi scan.
3904 
3905     // Fix the scalar loop reduction variable with the incoming reduction sum
3906     // from the vector body and from the backedge value.
3907   int IncomingEdgeBlockIdx =
3908     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
3909   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
3910   // Pick the other block.
3911   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
3912   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
3913   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
3914 }
3915 
3916 void InnerLoopVectorizer::clearReductionWrapFlags(
3917     RecurrenceDescriptor &RdxDesc) {
3918   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3919   if (RK != RecurrenceDescriptor::RK_IntegerAdd &&
3920       RK != RecurrenceDescriptor::RK_IntegerMult)
3921     return;
3922 
3923   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
3924   assert(LoopExitInstr && "null loop exit instruction");
3925   SmallVector<Instruction *, 8> Worklist;
3926   SmallPtrSet<Instruction *, 8> Visited;
3927   Worklist.push_back(LoopExitInstr);
3928   Visited.insert(LoopExitInstr);
3929 
3930   while (!Worklist.empty()) {
3931     Instruction *Cur = Worklist.pop_back_val();
3932     if (isa<OverflowingBinaryOperator>(Cur))
3933       for (unsigned Part = 0; Part < UF; ++Part) {
3934         Value *V = getOrCreateVectorValue(Cur, Part);
3935         cast<Instruction>(V)->dropPoisonGeneratingFlags();
3936       }
3937 
3938     for (User *U : Cur->users()) {
3939       Instruction *UI = cast<Instruction>(U);
3940       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
3941           Visited.insert(UI).second)
3942         Worklist.push_back(UI);
3943     }
3944   }
3945 }
3946 
3947 void InnerLoopVectorizer::fixLCSSAPHIs() {
3948   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3949     if (LCSSAPhi.getNumIncomingValues() == 1) {
3950       auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
3951       // Non-instruction incoming values will have only one value.
3952       unsigned LastLane = 0;
3953       if (isa<Instruction>(IncomingValue))
3954           LastLane = Cost->isUniformAfterVectorization(
3955                          cast<Instruction>(IncomingValue), VF)
3956                          ? 0
3957                          : VF - 1;
3958       // Can be a loop invariant incoming value or the last scalar value to be
3959       // extracted from the vectorized loop.
3960       Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3961       Value *lastIncomingValue =
3962           getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane });
3963       LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
3964     }
3965   }
3966 }
3967 
3968 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
3969   // The basic block and loop containing the predicated instruction.
3970   auto *PredBB = PredInst->getParent();
3971   auto *VectorLoop = LI->getLoopFor(PredBB);
3972 
3973   // Initialize a worklist with the operands of the predicated instruction.
3974   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
3975 
3976   // Holds instructions that we need to analyze again. An instruction may be
3977   // reanalyzed if we don't yet know if we can sink it or not.
3978   SmallVector<Instruction *, 8> InstsToReanalyze;
3979 
3980   // Returns true if a given use occurs in the predicated block. Phi nodes use
3981   // their operands in their corresponding predecessor blocks.
3982   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
3983     auto *I = cast<Instruction>(U.getUser());
3984     BasicBlock *BB = I->getParent();
3985     if (auto *Phi = dyn_cast<PHINode>(I))
3986       BB = Phi->getIncomingBlock(
3987           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
3988     return BB == PredBB;
3989   };
3990 
3991   // Iteratively sink the scalarized operands of the predicated instruction
3992   // into the block we created for it. When an instruction is sunk, it's
3993   // operands are then added to the worklist. The algorithm ends after one pass
3994   // through the worklist doesn't sink a single instruction.
3995   bool Changed;
3996   do {
3997     // Add the instructions that need to be reanalyzed to the worklist, and
3998     // reset the changed indicator.
3999     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4000     InstsToReanalyze.clear();
4001     Changed = false;
4002 
4003     while (!Worklist.empty()) {
4004       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4005 
4006       // We can't sink an instruction if it is a phi node, is already in the
4007       // predicated block, is not in the loop, or may have side effects.
4008       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
4009           !VectorLoop->contains(I) || I->mayHaveSideEffects())
4010         continue;
4011 
4012       // It's legal to sink the instruction if all its uses occur in the
4013       // predicated block. Otherwise, there's nothing to do yet, and we may
4014       // need to reanalyze the instruction.
4015       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4016         InstsToReanalyze.push_back(I);
4017         continue;
4018       }
4019 
4020       // Move the instruction to the beginning of the predicated block, and add
4021       // it's operands to the worklist.
4022       I->moveBefore(&*PredBB->getFirstInsertionPt());
4023       Worklist.insert(I->op_begin(), I->op_end());
4024 
4025       // The sinking may have enabled other instructions to be sunk, so we will
4026       // need to iterate.
4027       Changed = true;
4028     }
4029   } while (Changed);
4030 }
4031 
4032 void InnerLoopVectorizer::fixNonInductionPHIs() {
4033   for (PHINode *OrigPhi : OrigPHIsToFix) {
4034     PHINode *NewPhi =
4035         cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0));
4036     unsigned NumIncomingValues = OrigPhi->getNumIncomingValues();
4037 
4038     SmallVector<BasicBlock *, 2> ScalarBBPredecessors(
4039         predecessors(OrigPhi->getParent()));
4040     SmallVector<BasicBlock *, 2> VectorBBPredecessors(
4041         predecessors(NewPhi->getParent()));
4042     assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() &&
4043            "Scalar and Vector BB should have the same number of predecessors");
4044 
4045     // The insertion point in Builder may be invalidated by the time we get
4046     // here. Force the Builder insertion point to something valid so that we do
4047     // not run into issues during insertion point restore in
4048     // getOrCreateVectorValue calls below.
4049     Builder.SetInsertPoint(NewPhi);
4050 
4051     // The predecessor order is preserved and we can rely on mapping between
4052     // scalar and vector block predecessors.
4053     for (unsigned i = 0; i < NumIncomingValues; ++i) {
4054       BasicBlock *NewPredBB = VectorBBPredecessors[i];
4055 
4056       // When looking up the new scalar/vector values to fix up, use incoming
4057       // values from original phi.
4058       Value *ScIncV =
4059           OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]);
4060 
4061       // Scalar incoming value may need a broadcast
4062       Value *NewIncV = getOrCreateVectorValue(ScIncV, 0);
4063       NewPhi->addIncoming(NewIncV, NewPredBB);
4064     }
4065   }
4066 }
4067 
4068 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, unsigned UF,
4069                                    unsigned VF, bool IsPtrLoopInvariant,
4070                                    SmallBitVector &IsIndexLoopInvariant) {
4071   // Construct a vector GEP by widening the operands of the scalar GEP as
4072   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4073   // results in a vector of pointers when at least one operand of the GEP
4074   // is vector-typed. Thus, to keep the representation compact, we only use
4075   // vector-typed operands for loop-varying values.
4076 
4077   if (VF > 1 && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4078     // If we are vectorizing, but the GEP has only loop-invariant operands,
4079     // the GEP we build (by only using vector-typed operands for
4080     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4081     // produce a vector of pointers, we need to either arbitrarily pick an
4082     // operand to broadcast, or broadcast a clone of the original GEP.
4083     // Here, we broadcast a clone of the original.
4084     //
4085     // TODO: If at some point we decide to scalarize instructions having
4086     //       loop-invariant operands, this special case will no longer be
4087     //       required. We would add the scalarization decision to
4088     //       collectLoopScalars() and teach getVectorValue() to broadcast
4089     //       the lane-zero scalar value.
4090     auto *Clone = Builder.Insert(GEP->clone());
4091     for (unsigned Part = 0; Part < UF; ++Part) {
4092       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4093       VectorLoopValueMap.setVectorValue(GEP, Part, EntryPart);
4094       addMetadata(EntryPart, GEP);
4095     }
4096   } else {
4097     // If the GEP has at least one loop-varying operand, we are sure to
4098     // produce a vector of pointers. But if we are only unrolling, we want
4099     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4100     // produce with the code below will be scalar (if VF == 1) or vector
4101     // (otherwise). Note that for the unroll-only case, we still maintain
4102     // values in the vector mapping with initVector, as we do for other
4103     // instructions.
4104     for (unsigned Part = 0; Part < UF; ++Part) {
4105       // The pointer operand of the new GEP. If it's loop-invariant, we
4106       // won't broadcast it.
4107       auto *Ptr = IsPtrLoopInvariant
4108                       ? GEP->getPointerOperand()
4109                       : getOrCreateVectorValue(GEP->getPointerOperand(), Part);
4110 
4111       // Collect all the indices for the new GEP. If any index is
4112       // loop-invariant, we won't broadcast it.
4113       SmallVector<Value *, 4> Indices;
4114       for (auto Index : enumerate(GEP->indices())) {
4115         Value *User = Index.value().get();
4116         if (IsIndexLoopInvariant[Index.index()])
4117           Indices.push_back(User);
4118         else
4119           Indices.push_back(getOrCreateVectorValue(User, Part));
4120       }
4121 
4122       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4123       // but it should be a vector, otherwise.
4124       auto *NewGEP =
4125           GEP->isInBounds()
4126               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4127                                           Indices)
4128               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4129       assert((VF == 1 || NewGEP->getType()->isVectorTy()) &&
4130              "NewGEP is not a pointer vector");
4131       VectorLoopValueMap.setVectorValue(GEP, Part, NewGEP);
4132       addMetadata(NewGEP, GEP);
4133     }
4134   }
4135 }
4136 
4137 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF,
4138                                               unsigned VF) {
4139   PHINode *P = cast<PHINode>(PN);
4140   if (EnableVPlanNativePath) {
4141     // Currently we enter here in the VPlan-native path for non-induction
4142     // PHIs where all control flow is uniform. We simply widen these PHIs.
4143     // Create a vector phi with no operands - the vector phi operands will be
4144     // set at the end of vector code generation.
4145     Type *VecTy =
4146         (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
4147     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4148     VectorLoopValueMap.setVectorValue(P, 0, VecPhi);
4149     OrigPHIsToFix.push_back(P);
4150 
4151     return;
4152   }
4153 
4154   assert(PN->getParent() == OrigLoop->getHeader() &&
4155          "Non-header phis should have been handled elsewhere");
4156 
4157   // In order to support recurrences we need to be able to vectorize Phi nodes.
4158   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4159   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4160   // this value when we vectorize all of the instructions that use the PHI.
4161   if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
4162     for (unsigned Part = 0; Part < UF; ++Part) {
4163       // This is phase one of vectorizing PHIs.
4164       Type *VecTy =
4165           (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
4166       Value *EntryPart = PHINode::Create(
4167           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4168       VectorLoopValueMap.setVectorValue(P, Part, EntryPart);
4169     }
4170     return;
4171   }
4172 
4173   setDebugLocFromInst(Builder, P);
4174 
4175   // This PHINode must be an induction variable.
4176   // Make sure that we know about it.
4177   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4178 
4179   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4180   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4181 
4182   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4183   // which can be found from the original scalar operations.
4184   switch (II.getKind()) {
4185   case InductionDescriptor::IK_NoInduction:
4186     llvm_unreachable("Unknown induction");
4187   case InductionDescriptor::IK_IntInduction:
4188   case InductionDescriptor::IK_FpInduction:
4189     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4190   case InductionDescriptor::IK_PtrInduction: {
4191     // Handle the pointer induction variable case.
4192     assert(P->getType()->isPointerTy() && "Unexpected type.");
4193     // This is the normalized GEP that starts counting at zero.
4194     Value *PtrInd = Induction;
4195     PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType());
4196     // Determine the number of scalars we need to generate for each unroll
4197     // iteration. If the instruction is uniform, we only need to generate the
4198     // first lane. Otherwise, we generate all VF values.
4199     unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF;
4200     // These are the scalar results. Notice that we don't generate vector GEPs
4201     // because scalar GEPs result in better code.
4202     for (unsigned Part = 0; Part < UF; ++Part) {
4203       for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4204         Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF);
4205         Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4206         Value *SclrGep =
4207             emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4208         SclrGep->setName("next.gep");
4209         VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep);
4210       }
4211     }
4212     return;
4213   }
4214   }
4215 }
4216 
4217 /// A helper function for checking whether an integer division-related
4218 /// instruction may divide by zero (in which case it must be predicated if
4219 /// executed conditionally in the scalar code).
4220 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4221 /// Non-zero divisors that are non compile-time constants will not be
4222 /// converted into multiplication, so we will still end up scalarizing
4223 /// the division, but can do so w/o predication.
4224 static bool mayDivideByZero(Instruction &I) {
4225   assert((I.getOpcode() == Instruction::UDiv ||
4226           I.getOpcode() == Instruction::SDiv ||
4227           I.getOpcode() == Instruction::URem ||
4228           I.getOpcode() == Instruction::SRem) &&
4229          "Unexpected instruction");
4230   Value *Divisor = I.getOperand(1);
4231   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4232   return !CInt || CInt->isZero();
4233 }
4234 
4235 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPUser &User,
4236                                            VPTransformState &State) {
4237   switch (I.getOpcode()) {
4238   case Instruction::Call:
4239   case Instruction::Br:
4240   case Instruction::PHI:
4241   case Instruction::GetElementPtr:
4242   case Instruction::Select:
4243     llvm_unreachable("This instruction is handled by a different recipe.");
4244   case Instruction::UDiv:
4245   case Instruction::SDiv:
4246   case Instruction::SRem:
4247   case Instruction::URem:
4248   case Instruction::Add:
4249   case Instruction::FAdd:
4250   case Instruction::Sub:
4251   case Instruction::FSub:
4252   case Instruction::FNeg:
4253   case Instruction::Mul:
4254   case Instruction::FMul:
4255   case Instruction::FDiv:
4256   case Instruction::FRem:
4257   case Instruction::Shl:
4258   case Instruction::LShr:
4259   case Instruction::AShr:
4260   case Instruction::And:
4261   case Instruction::Or:
4262   case Instruction::Xor: {
4263     // Just widen unops and binops.
4264     setDebugLocFromInst(Builder, &I);
4265 
4266     for (unsigned Part = 0; Part < UF; ++Part) {
4267       SmallVector<Value *, 2> Ops;
4268       for (VPValue *VPOp : User.operands())
4269         Ops.push_back(State.get(VPOp, Part));
4270 
4271       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
4272 
4273       if (auto *VecOp = dyn_cast<Instruction>(V))
4274         VecOp->copyIRFlags(&I);
4275 
4276       // Use this vector value for all users of the original instruction.
4277       VectorLoopValueMap.setVectorValue(&I, Part, V);
4278       addMetadata(V, &I);
4279     }
4280 
4281     break;
4282   }
4283   case Instruction::ICmp:
4284   case Instruction::FCmp: {
4285     // Widen compares. Generate vector compares.
4286     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4287     auto *Cmp = cast<CmpInst>(&I);
4288     setDebugLocFromInst(Builder, Cmp);
4289     for (unsigned Part = 0; Part < UF; ++Part) {
4290       Value *A = State.get(User.getOperand(0), Part);
4291       Value *B = State.get(User.getOperand(1), Part);
4292       Value *C = nullptr;
4293       if (FCmp) {
4294         // Propagate fast math flags.
4295         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4296         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4297         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4298       } else {
4299         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4300       }
4301       VectorLoopValueMap.setVectorValue(&I, Part, C);
4302       addMetadata(C, &I);
4303     }
4304 
4305     break;
4306   }
4307 
4308   case Instruction::ZExt:
4309   case Instruction::SExt:
4310   case Instruction::FPToUI:
4311   case Instruction::FPToSI:
4312   case Instruction::FPExt:
4313   case Instruction::PtrToInt:
4314   case Instruction::IntToPtr:
4315   case Instruction::SIToFP:
4316   case Instruction::UIToFP:
4317   case Instruction::Trunc:
4318   case Instruction::FPTrunc:
4319   case Instruction::BitCast: {
4320     auto *CI = cast<CastInst>(&I);
4321     setDebugLocFromInst(Builder, CI);
4322 
4323     /// Vectorize casts.
4324     Type *DestTy =
4325         (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF);
4326 
4327     for (unsigned Part = 0; Part < UF; ++Part) {
4328       Value *A = State.get(User.getOperand(0), Part);
4329       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4330       VectorLoopValueMap.setVectorValue(&I, Part, Cast);
4331       addMetadata(Cast, &I);
4332     }
4333     break;
4334   }
4335   default:
4336     // This instruction is not vectorized by simple widening.
4337     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4338     llvm_unreachable("Unhandled instruction!");
4339   } // end of switch.
4340 }
4341 
4342 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPUser &ArgOperands,
4343                                                VPTransformState &State) {
4344   assert(!isa<DbgInfoIntrinsic>(I) &&
4345          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4346   setDebugLocFromInst(Builder, &I);
4347 
4348   Module *M = I.getParent()->getParent()->getParent();
4349   auto *CI = cast<CallInst>(&I);
4350 
4351   SmallVector<Type *, 4> Tys;
4352   for (Value *ArgOperand : CI->arg_operands())
4353     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF));
4354 
4355   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4356 
4357   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4358   // version of the instruction.
4359   // Is it beneficial to perform intrinsic call compared to lib call?
4360   bool NeedToScalarize = false;
4361   unsigned CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4362   bool UseVectorIntrinsic =
4363       ID && Cost->getVectorIntrinsicCost(CI, VF) <= CallCost;
4364   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4365          "Instruction should be scalarized elsewhere.");
4366 
4367   for (unsigned Part = 0; Part < UF; ++Part) {
4368     SmallVector<Value *, 4> Args;
4369     for (auto &I : enumerate(ArgOperands.operands())) {
4370       // Some intrinsics have a scalar argument - don't replace it with a
4371       // vector.
4372       Value *Arg;
4373       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4374         Arg = State.get(I.value(), Part);
4375       else
4376         Arg = State.get(I.value(), {0, 0});
4377       Args.push_back(Arg);
4378     }
4379 
4380     Function *VectorF;
4381     if (UseVectorIntrinsic) {
4382       // Use vector version of the intrinsic.
4383       Type *TysForDecl[] = {CI->getType()};
4384       if (VF > 1)
4385         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4386       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4387       assert(VectorF && "Can't retrieve vector intrinsic.");
4388     } else {
4389       // Use vector version of the function call.
4390       const VFShape Shape =
4391           VFShape::get(*CI, {VF, false} /*EC*/, false /*HasGlobalPred*/);
4392 #ifndef NDEBUG
4393       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4394              "Can't create vector function.");
4395 #endif
4396         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4397     }
4398       SmallVector<OperandBundleDef, 1> OpBundles;
4399       CI->getOperandBundlesAsDefs(OpBundles);
4400       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4401 
4402       if (isa<FPMathOperator>(V))
4403         V->copyFastMathFlags(CI);
4404 
4405       VectorLoopValueMap.setVectorValue(&I, Part, V);
4406       addMetadata(V, &I);
4407   }
4408 }
4409 
4410 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I,
4411                                                  bool InvariantCond) {
4412   setDebugLocFromInst(Builder, &I);
4413 
4414   // The condition can be loop invariant  but still defined inside the
4415   // loop. This means that we can't just use the original 'cond' value.
4416   // We have to take the 'vectorized' value and pick the first lane.
4417   // Instcombine will make this a no-op.
4418 
4419   auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0});
4420 
4421   for (unsigned Part = 0; Part < UF; ++Part) {
4422     Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part);
4423     Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part);
4424     Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part);
4425     Value *Sel =
4426         Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1);
4427     VectorLoopValueMap.setVectorValue(&I, Part, Sel);
4428     addMetadata(Sel, &I);
4429   }
4430 }
4431 
4432 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
4433   // We should not collect Scalars more than once per VF. Right now, this
4434   // function is called from collectUniformsAndScalars(), which already does
4435   // this check. Collecting Scalars for VF=1 does not make any sense.
4436   assert(VF >= 2 && Scalars.find(VF) == Scalars.end() &&
4437          "This function should not be visited twice for the same VF");
4438 
4439   SmallSetVector<Instruction *, 8> Worklist;
4440 
4441   // These sets are used to seed the analysis with pointers used by memory
4442   // accesses that will remain scalar.
4443   SmallSetVector<Instruction *, 8> ScalarPtrs;
4444   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4445 
4446   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4447   // The pointer operands of loads and stores will be scalar as long as the
4448   // memory access is not a gather or scatter operation. The value operand of a
4449   // store will remain scalar if the store is scalarized.
4450   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4451     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4452     assert(WideningDecision != CM_Unknown &&
4453            "Widening decision should be ready at this moment");
4454     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4455       if (Ptr == Store->getValueOperand())
4456         return WideningDecision == CM_Scalarize;
4457     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4458            "Ptr is neither a value or pointer operand");
4459     return WideningDecision != CM_GatherScatter;
4460   };
4461 
4462   // A helper that returns true if the given value is a bitcast or
4463   // getelementptr instruction contained in the loop.
4464   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4465     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4466             isa<GetElementPtrInst>(V)) &&
4467            !TheLoop->isLoopInvariant(V);
4468   };
4469 
4470   // A helper that evaluates a memory access's use of a pointer. If the use
4471   // will be a scalar use, and the pointer is only used by memory accesses, we
4472   // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4473   // PossibleNonScalarPtrs.
4474   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4475     // We only care about bitcast and getelementptr instructions contained in
4476     // the loop.
4477     if (!isLoopVaryingBitCastOrGEP(Ptr))
4478       return;
4479 
4480     // If the pointer has already been identified as scalar (e.g., if it was
4481     // also identified as uniform), there's nothing to do.
4482     auto *I = cast<Instruction>(Ptr);
4483     if (Worklist.count(I))
4484       return;
4485 
4486     // If the use of the pointer will be a scalar use, and all users of the
4487     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4488     // place the pointer in PossibleNonScalarPtrs.
4489     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4490           return isa<LoadInst>(U) || isa<StoreInst>(U);
4491         }))
4492       ScalarPtrs.insert(I);
4493     else
4494       PossibleNonScalarPtrs.insert(I);
4495   };
4496 
4497   // We seed the scalars analysis with three classes of instructions: (1)
4498   // instructions marked uniform-after-vectorization, (2) bitcast and
4499   // getelementptr instructions used by memory accesses requiring a scalar use,
4500   // and (3) pointer induction variables and their update instructions (we
4501   // currently only scalarize these).
4502   //
4503   // (1) Add to the worklist all instructions that have been identified as
4504   // uniform-after-vectorization.
4505   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4506 
4507   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4508   // memory accesses requiring a scalar use. The pointer operands of loads and
4509   // stores will be scalar as long as the memory accesses is not a gather or
4510   // scatter operation. The value operand of a store will remain scalar if the
4511   // store is scalarized.
4512   for (auto *BB : TheLoop->blocks())
4513     for (auto &I : *BB) {
4514       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4515         evaluatePtrUse(Load, Load->getPointerOperand());
4516       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4517         evaluatePtrUse(Store, Store->getPointerOperand());
4518         evaluatePtrUse(Store, Store->getValueOperand());
4519       }
4520     }
4521   for (auto *I : ScalarPtrs)
4522     if (PossibleNonScalarPtrs.find(I) == PossibleNonScalarPtrs.end()) {
4523       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4524       Worklist.insert(I);
4525     }
4526 
4527   // (3) Add to the worklist all pointer induction variables and their update
4528   // instructions.
4529   //
4530   // TODO: Once we are able to vectorize pointer induction variables we should
4531   //       no longer insert them into the worklist here.
4532   auto *Latch = TheLoop->getLoopLatch();
4533   for (auto &Induction : Legal->getInductionVars()) {
4534     auto *Ind = Induction.first;
4535     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4536     if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction)
4537       continue;
4538     Worklist.insert(Ind);
4539     Worklist.insert(IndUpdate);
4540     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4541     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4542                       << "\n");
4543   }
4544 
4545   // Insert the forced scalars.
4546   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4547   // induction variable when the PHI user is scalarized.
4548   auto ForcedScalar = ForcedScalars.find(VF);
4549   if (ForcedScalar != ForcedScalars.end())
4550     for (auto *I : ForcedScalar->second)
4551       Worklist.insert(I);
4552 
4553   // Expand the worklist by looking through any bitcasts and getelementptr
4554   // instructions we've already identified as scalar. This is similar to the
4555   // expansion step in collectLoopUniforms(); however, here we're only
4556   // expanding to include additional bitcasts and getelementptr instructions.
4557   unsigned Idx = 0;
4558   while (Idx != Worklist.size()) {
4559     Instruction *Dst = Worklist[Idx++];
4560     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4561       continue;
4562     auto *Src = cast<Instruction>(Dst->getOperand(0));
4563     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4564           auto *J = cast<Instruction>(U);
4565           return !TheLoop->contains(J) || Worklist.count(J) ||
4566                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4567                   isScalarUse(J, Src));
4568         })) {
4569       Worklist.insert(Src);
4570       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4571     }
4572   }
4573 
4574   // An induction variable will remain scalar if all users of the induction
4575   // variable and induction variable update remain scalar.
4576   for (auto &Induction : Legal->getInductionVars()) {
4577     auto *Ind = Induction.first;
4578     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4579 
4580     // We already considered pointer induction variables, so there's no reason
4581     // to look at their users again.
4582     //
4583     // TODO: Once we are able to vectorize pointer induction variables we
4584     //       should no longer skip over them here.
4585     if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction)
4586       continue;
4587 
4588     // Determine if all users of the induction variable are scalar after
4589     // vectorization.
4590     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4591       auto *I = cast<Instruction>(U);
4592       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
4593     });
4594     if (!ScalarInd)
4595       continue;
4596 
4597     // Determine if all users of the induction variable update instruction are
4598     // scalar after vectorization.
4599     auto ScalarIndUpdate =
4600         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4601           auto *I = cast<Instruction>(U);
4602           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
4603         });
4604     if (!ScalarIndUpdate)
4605       continue;
4606 
4607     // The induction variable and its update instruction will remain scalar.
4608     Worklist.insert(Ind);
4609     Worklist.insert(IndUpdate);
4610     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4611     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4612                       << "\n");
4613   }
4614 
4615   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4616 }
4617 
4618 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) {
4619   if (!blockNeedsPredication(I->getParent()))
4620     return false;
4621   switch(I->getOpcode()) {
4622   default:
4623     break;
4624   case Instruction::Load:
4625   case Instruction::Store: {
4626     if (!Legal->isMaskRequired(I))
4627       return false;
4628     auto *Ptr = getLoadStorePointerOperand(I);
4629     auto *Ty = getMemInstValueType(I);
4630     // We have already decided how to vectorize this instruction, get that
4631     // result.
4632     if (VF > 1) {
4633       InstWidening WideningDecision = getWideningDecision(I, VF);
4634       assert(WideningDecision != CM_Unknown &&
4635              "Widening decision should be ready at this moment");
4636       return WideningDecision == CM_Scalarize;
4637     }
4638     const MaybeAlign Alignment = getLoadStoreAlignment(I);
4639     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4640                                 isLegalMaskedGather(Ty, Alignment))
4641                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4642                                 isLegalMaskedScatter(Ty, Alignment));
4643   }
4644   case Instruction::UDiv:
4645   case Instruction::SDiv:
4646   case Instruction::SRem:
4647   case Instruction::URem:
4648     return mayDivideByZero(*I);
4649   }
4650   return false;
4651 }
4652 
4653 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I,
4654                                                                unsigned VF) {
4655   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4656   assert(getWideningDecision(I, VF) == CM_Unknown &&
4657          "Decision should not be set yet.");
4658   auto *Group = getInterleavedAccessGroup(I);
4659   assert(Group && "Must have a group.");
4660 
4661   // If the instruction's allocated size doesn't equal it's type size, it
4662   // requires padding and will be scalarized.
4663   auto &DL = I->getModule()->getDataLayout();
4664   auto *ScalarTy = getMemInstValueType(I);
4665   if (hasIrregularType(ScalarTy, DL, VF))
4666     return false;
4667 
4668   // Check if masking is required.
4669   // A Group may need masking for one of two reasons: it resides in a block that
4670   // needs predication, or it was decided to use masking to deal with gaps.
4671   bool PredicatedAccessRequiresMasking =
4672       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
4673   bool AccessWithGapsRequiresMasking =
4674       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
4675   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
4676     return true;
4677 
4678   // If masked interleaving is required, we expect that the user/target had
4679   // enabled it, because otherwise it either wouldn't have been created or
4680   // it should have been invalidated by the CostModel.
4681   assert(useMaskedInterleavedAccesses(TTI) &&
4682          "Masked interleave-groups for predicated accesses are not enabled.");
4683 
4684   auto *Ty = getMemInstValueType(I);
4685   const MaybeAlign Alignment = getLoadStoreAlignment(I);
4686   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4687                           : TTI.isLegalMaskedStore(Ty, Alignment);
4688 }
4689 
4690 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I,
4691                                                                unsigned VF) {
4692   // Get and ensure we have a valid memory instruction.
4693   LoadInst *LI = dyn_cast<LoadInst>(I);
4694   StoreInst *SI = dyn_cast<StoreInst>(I);
4695   assert((LI || SI) && "Invalid memory instruction");
4696 
4697   auto *Ptr = getLoadStorePointerOperand(I);
4698 
4699   // In order to be widened, the pointer should be consecutive, first of all.
4700   if (!Legal->isConsecutivePtr(Ptr))
4701     return false;
4702 
4703   // If the instruction is a store located in a predicated block, it will be
4704   // scalarized.
4705   if (isScalarWithPredication(I))
4706     return false;
4707 
4708   // If the instruction's allocated size doesn't equal it's type size, it
4709   // requires padding and will be scalarized.
4710   auto &DL = I->getModule()->getDataLayout();
4711   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
4712   if (hasIrregularType(ScalarTy, DL, VF))
4713     return false;
4714 
4715   return true;
4716 }
4717 
4718 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
4719   // We should not collect Uniforms more than once per VF. Right now,
4720   // this function is called from collectUniformsAndScalars(), which
4721   // already does this check. Collecting Uniforms for VF=1 does not make any
4722   // sense.
4723 
4724   assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() &&
4725          "This function should not be visited twice for the same VF");
4726 
4727   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4728   // not analyze again.  Uniforms.count(VF) will return 1.
4729   Uniforms[VF].clear();
4730 
4731   // We now know that the loop is vectorizable!
4732   // Collect instructions inside the loop that will remain uniform after
4733   // vectorization.
4734 
4735   // Global values, params and instructions outside of current loop are out of
4736   // scope.
4737   auto isOutOfScope = [&](Value *V) -> bool {
4738     Instruction *I = dyn_cast<Instruction>(V);
4739     return (!I || !TheLoop->contains(I));
4740   };
4741 
4742   SetVector<Instruction *> Worklist;
4743   BasicBlock *Latch = TheLoop->getLoopLatch();
4744 
4745   // Instructions that are scalar with predication must not be considered
4746   // uniform after vectorization, because that would create an erroneous
4747   // replicating region where only a single instance out of VF should be formed.
4748   // TODO: optimize such seldom cases if found important, see PR40816.
4749   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
4750     if (isScalarWithPredication(I, VF)) {
4751       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
4752                         << *I << "\n");
4753       return;
4754     }
4755     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
4756     Worklist.insert(I);
4757   };
4758 
4759   // Start with the conditional branch. If the branch condition is an
4760   // instruction contained in the loop that is only used by the branch, it is
4761   // uniform.
4762   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4763   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
4764     addToWorklistIfAllowed(Cmp);
4765 
4766   // Holds consecutive and consecutive-like pointers. Consecutive-like pointers
4767   // are pointers that are treated like consecutive pointers during
4768   // vectorization. The pointer operands of interleaved accesses are an
4769   // example.
4770   SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs;
4771 
4772   // Holds pointer operands of instructions that are possibly non-uniform.
4773   SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs;
4774 
4775   auto isUniformDecision = [&](Instruction *I, unsigned VF) {
4776     InstWidening WideningDecision = getWideningDecision(I, VF);
4777     assert(WideningDecision != CM_Unknown &&
4778            "Widening decision should be ready at this moment");
4779 
4780     return (WideningDecision == CM_Widen ||
4781             WideningDecision == CM_Widen_Reverse ||
4782             WideningDecision == CM_Interleave);
4783   };
4784   // Iterate over the instructions in the loop, and collect all
4785   // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible
4786   // that a consecutive-like pointer operand will be scalarized, we collect it
4787   // in PossibleNonUniformPtrs instead. We use two sets here because a single
4788   // getelementptr instruction can be used by both vectorized and scalarized
4789   // memory instructions. For example, if a loop loads and stores from the same
4790   // location, but the store is conditional, the store will be scalarized, and
4791   // the getelementptr won't remain uniform.
4792   for (auto *BB : TheLoop->blocks())
4793     for (auto &I : *BB) {
4794       // If there's no pointer operand, there's nothing to do.
4795       auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
4796       if (!Ptr)
4797         continue;
4798 
4799       // True if all users of Ptr are memory accesses that have Ptr as their
4800       // pointer operand.
4801       auto UsersAreMemAccesses =
4802           llvm::all_of(Ptr->users(), [&](User *U) -> bool {
4803             return getLoadStorePointerOperand(U) == Ptr;
4804           });
4805 
4806       // Ensure the memory instruction will not be scalarized or used by
4807       // gather/scatter, making its pointer operand non-uniform. If the pointer
4808       // operand is used by any instruction other than a memory access, we
4809       // conservatively assume the pointer operand may be non-uniform.
4810       if (!UsersAreMemAccesses || !isUniformDecision(&I, VF))
4811         PossibleNonUniformPtrs.insert(Ptr);
4812 
4813       // If the memory instruction will be vectorized and its pointer operand
4814       // is consecutive-like, or interleaving - the pointer operand should
4815       // remain uniform.
4816       else
4817         ConsecutiveLikePtrs.insert(Ptr);
4818     }
4819 
4820   // Add to the Worklist all consecutive and consecutive-like pointers that
4821   // aren't also identified as possibly non-uniform.
4822   for (auto *V : ConsecutiveLikePtrs)
4823     if (PossibleNonUniformPtrs.find(V) == PossibleNonUniformPtrs.end())
4824       addToWorklistIfAllowed(V);
4825 
4826   // Expand Worklist in topological order: whenever a new instruction
4827   // is added , its users should be already inside Worklist.  It ensures
4828   // a uniform instruction will only be used by uniform instructions.
4829   unsigned idx = 0;
4830   while (idx != Worklist.size()) {
4831     Instruction *I = Worklist[idx++];
4832 
4833     for (auto OV : I->operand_values()) {
4834       // isOutOfScope operands cannot be uniform instructions.
4835       if (isOutOfScope(OV))
4836         continue;
4837       // First order recurrence Phi's should typically be considered
4838       // non-uniform.
4839       auto *OP = dyn_cast<PHINode>(OV);
4840       if (OP && Legal->isFirstOrderRecurrence(OP))
4841         continue;
4842       // If all the users of the operand are uniform, then add the
4843       // operand into the uniform worklist.
4844       auto *OI = cast<Instruction>(OV);
4845       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
4846             auto *J = cast<Instruction>(U);
4847             return Worklist.count(J) ||
4848                    (OI == getLoadStorePointerOperand(J) &&
4849                     isUniformDecision(J, VF));
4850           }))
4851         addToWorklistIfAllowed(OI);
4852     }
4853   }
4854 
4855   // Returns true if Ptr is the pointer operand of a memory access instruction
4856   // I, and I is known to not require scalarization.
4857   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
4858     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
4859   };
4860 
4861   // For an instruction to be added into Worklist above, all its users inside
4862   // the loop should also be in Worklist. However, this condition cannot be
4863   // true for phi nodes that form a cyclic dependence. We must process phi
4864   // nodes separately. An induction variable will remain uniform if all users
4865   // of the induction variable and induction variable update remain uniform.
4866   // The code below handles both pointer and non-pointer induction variables.
4867   for (auto &Induction : Legal->getInductionVars()) {
4868     auto *Ind = Induction.first;
4869     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4870 
4871     // Determine if all users of the induction variable are uniform after
4872     // vectorization.
4873     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4874       auto *I = cast<Instruction>(U);
4875       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4876              isVectorizedMemAccessUse(I, Ind);
4877     });
4878     if (!UniformInd)
4879       continue;
4880 
4881     // Determine if all users of the induction variable update instruction are
4882     // uniform after vectorization.
4883     auto UniformIndUpdate =
4884         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4885           auto *I = cast<Instruction>(U);
4886           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4887                  isVectorizedMemAccessUse(I, IndUpdate);
4888         });
4889     if (!UniformIndUpdate)
4890       continue;
4891 
4892     // The induction variable and its update instruction will remain uniform.
4893     addToWorklistIfAllowed(Ind);
4894     addToWorklistIfAllowed(IndUpdate);
4895   }
4896 
4897   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
4898 }
4899 
4900 bool LoopVectorizationCostModel::runtimeChecksRequired() {
4901   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
4902 
4903   if (Legal->getRuntimePointerChecking()->Need) {
4904     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
4905         "runtime pointer checks needed. Enable vectorization of this "
4906         "loop with '#pragma clang loop vectorize(enable)' when "
4907         "compiling with -Os/-Oz",
4908         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4909     return true;
4910   }
4911 
4912   if (!PSE.getUnionPredicate().getPredicates().empty()) {
4913     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
4914         "runtime SCEV checks needed. Enable vectorization of this "
4915         "loop with '#pragma clang loop vectorize(enable)' when "
4916         "compiling with -Os/-Oz",
4917         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4918     return true;
4919   }
4920 
4921   // FIXME: Avoid specializing for stride==1 instead of bailing out.
4922   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
4923     reportVectorizationFailure("Runtime stride check is required with -Os/-Oz",
4924         "runtime stride == 1 checks needed. Enable vectorization of "
4925         "this loop with '#pragma clang loop vectorize(enable)' when "
4926         "compiling with -Os/-Oz",
4927         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4928     return true;
4929   }
4930 
4931   return false;
4932 }
4933 
4934 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF() {
4935   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
4936     // TODO: It may by useful to do since it's still likely to be dynamically
4937     // uniform if the target can skip.
4938     reportVectorizationFailure(
4939         "Not inserting runtime ptr check for divergent target",
4940         "runtime pointer checks needed. Not enabled for divergent target",
4941         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
4942     return None;
4943   }
4944 
4945   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
4946   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
4947   if (TC == 1) {
4948     reportVectorizationFailure("Single iteration (non) loop",
4949         "loop trip count is one, irrelevant for vectorization",
4950         "SingleIterationLoop", ORE, TheLoop);
4951     return None;
4952   }
4953 
4954   switch (ScalarEpilogueStatus) {
4955   case CM_ScalarEpilogueAllowed:
4956     return computeFeasibleMaxVF(TC);
4957   case CM_ScalarEpilogueNotNeededUsePredicate:
4958     LLVM_DEBUG(
4959         dbgs() << "LV: vector predicate hint/switch found.\n"
4960                << "LV: Not allowing scalar epilogue, creating predicated "
4961                << "vector loop.\n");
4962     break;
4963   case CM_ScalarEpilogueNotAllowedLowTripLoop:
4964     // fallthrough as a special case of OptForSize
4965   case CM_ScalarEpilogueNotAllowedOptSize:
4966     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
4967       LLVM_DEBUG(
4968           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
4969     else
4970       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
4971                         << "count.\n");
4972 
4973     // Bail if runtime checks are required, which are not good when optimising
4974     // for size.
4975     if (runtimeChecksRequired())
4976       return None;
4977     break;
4978   }
4979 
4980   // Now try the tail folding
4981 
4982   // Invalidate interleave groups that require an epilogue if we can't mask
4983   // the interleave-group.
4984   if (!useMaskedInterleavedAccesses(TTI)) {
4985     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
4986            "No decisions should have been taken at this point");
4987     // Note: There is no need to invalidate any cost modeling decisions here, as
4988     // non where taken so far.
4989     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
4990   }
4991 
4992   unsigned MaxVF = computeFeasibleMaxVF(TC);
4993   if (TC > 0 && TC % MaxVF == 0) {
4994     // Accept MaxVF if we do not have a tail.
4995     LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
4996     return MaxVF;
4997   }
4998 
4999   // If we don't know the precise trip count, or if the trip count that we
5000   // found modulo the vectorization factor is not zero, try to fold the tail
5001   // by masking.
5002   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5003   if (Legal->prepareToFoldTailByMasking()) {
5004     FoldTailByMasking = true;
5005     return MaxVF;
5006   }
5007 
5008   if (TC == 0) {
5009     reportVectorizationFailure(
5010         "Unable to calculate the loop count due to complex control flow",
5011         "unable to calculate the loop count due to complex control flow",
5012         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5013     return None;
5014   }
5015 
5016   reportVectorizationFailure(
5017       "Cannot optimize for size and vectorize at the same time.",
5018       "cannot optimize for size and vectorize at the same time. "
5019       "Enable vectorization of this loop with '#pragma clang loop "
5020       "vectorize(enable)' when compiling with -Os/-Oz",
5021       "NoTailLoopWithOptForSize", ORE, TheLoop);
5022   return None;
5023 }
5024 
5025 unsigned
5026 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount) {
5027   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5028   unsigned SmallestType, WidestType;
5029   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5030   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
5031 
5032   // Get the maximum safe dependence distance in bits computed by LAA.
5033   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5034   // the memory accesses that is most restrictive (involved in the smallest
5035   // dependence distance).
5036   unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth();
5037 
5038   WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth);
5039 
5040   unsigned MaxVectorSize = WidestRegister / WidestType;
5041 
5042   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5043                     << " / " << WidestType << " bits.\n");
5044   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5045                     << WidestRegister << " bits.\n");
5046 
5047   assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements"
5048                                  " into one vector!");
5049   if (MaxVectorSize == 0) {
5050     LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
5051     MaxVectorSize = 1;
5052     return MaxVectorSize;
5053   } else if (ConstTripCount && ConstTripCount < MaxVectorSize &&
5054              isPowerOf2_32(ConstTripCount)) {
5055     // We need to clamp the VF to be the ConstTripCount. There is no point in
5056     // choosing a higher viable VF as done in the loop below.
5057     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5058                       << ConstTripCount << "\n");
5059     MaxVectorSize = ConstTripCount;
5060     return MaxVectorSize;
5061   }
5062 
5063   unsigned MaxVF = MaxVectorSize;
5064   if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) ||
5065       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5066     // Collect all viable vectorization factors larger than the default MaxVF
5067     // (i.e. MaxVectorSize).
5068     SmallVector<unsigned, 8> VFs;
5069     unsigned NewMaxVectorSize = WidestRegister / SmallestType;
5070     for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2)
5071       VFs.push_back(VS);
5072 
5073     // For each VF calculate its register usage.
5074     auto RUs = calculateRegisterUsage(VFs);
5075 
5076     // Select the largest VF which doesn't require more registers than existing
5077     // ones.
5078     for (int i = RUs.size() - 1; i >= 0; --i) {
5079       bool Selected = true;
5080       for (auto& pair : RUs[i].MaxLocalUsers) {
5081         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5082         if (pair.second > TargetNumRegisters)
5083           Selected = false;
5084       }
5085       if (Selected) {
5086         MaxVF = VFs[i];
5087         break;
5088       }
5089     }
5090     if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) {
5091       if (MaxVF < MinVF) {
5092         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5093                           << ") with target's minimum: " << MinVF << '\n');
5094         MaxVF = MinVF;
5095       }
5096     }
5097   }
5098   return MaxVF;
5099 }
5100 
5101 VectorizationFactor
5102 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
5103   float Cost = expectedCost(1).first;
5104   const float ScalarCost = Cost;
5105   unsigned Width = 1;
5106   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
5107 
5108   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5109   if (ForceVectorization && MaxVF > 1) {
5110     // Ignore scalar width, because the user explicitly wants vectorization.
5111     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5112     // evaluation.
5113     Cost = std::numeric_limits<float>::max();
5114   }
5115 
5116   for (unsigned i = 2; i <= MaxVF; i *= 2) {
5117     // Notice that the vector loop needs to be executed less times, so
5118     // we need to divide the cost of the vector loops by the width of
5119     // the vector elements.
5120     VectorizationCostTy C = expectedCost(i);
5121     float VectorCost = C.first / (float)i;
5122     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5123                       << " costs: " << (int)VectorCost << ".\n");
5124     if (!C.second && !ForceVectorization) {
5125       LLVM_DEBUG(
5126           dbgs() << "LV: Not considering vector loop of width " << i
5127                  << " because it will not generate any vector instructions.\n");
5128       continue;
5129     }
5130     if (VectorCost < Cost) {
5131       Cost = VectorCost;
5132       Width = i;
5133     }
5134   }
5135 
5136   if (!EnableCondStoresVectorization && NumPredStores) {
5137     reportVectorizationFailure("There are conditional stores.",
5138         "store that is conditionally executed prevents vectorization",
5139         "ConditionalStore", ORE, TheLoop);
5140     Width = 1;
5141     Cost = ScalarCost;
5142   }
5143 
5144   LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
5145              << "LV: Vectorization seems to be not beneficial, "
5146              << "but was forced by a user.\n");
5147   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
5148   VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)};
5149   return Factor;
5150 }
5151 
5152 std::pair<unsigned, unsigned>
5153 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5154   unsigned MinWidth = -1U;
5155   unsigned MaxWidth = 8;
5156   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5157 
5158   // For each block.
5159   for (BasicBlock *BB : TheLoop->blocks()) {
5160     // For each instruction in the loop.
5161     for (Instruction &I : BB->instructionsWithoutDebug()) {
5162       Type *T = I.getType();
5163 
5164       // Skip ignored values.
5165       if (ValuesToIgnore.find(&I) != ValuesToIgnore.end())
5166         continue;
5167 
5168       // Only examine Loads, Stores and PHINodes.
5169       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5170         continue;
5171 
5172       // Examine PHI nodes that are reduction variables. Update the type to
5173       // account for the recurrence type.
5174       if (auto *PN = dyn_cast<PHINode>(&I)) {
5175         if (!Legal->isReductionVariable(PN))
5176           continue;
5177         RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN];
5178         T = RdxDesc.getRecurrenceType();
5179       }
5180 
5181       // Examine the stored values.
5182       if (auto *ST = dyn_cast<StoreInst>(&I))
5183         T = ST->getValueOperand()->getType();
5184 
5185       // Ignore loaded pointer types and stored pointer types that are not
5186       // vectorizable.
5187       //
5188       // FIXME: The check here attempts to predict whether a load or store will
5189       //        be vectorized. We only know this for certain after a VF has
5190       //        been selected. Here, we assume that if an access can be
5191       //        vectorized, it will be. We should also look at extending this
5192       //        optimization to non-pointer types.
5193       //
5194       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
5195           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
5196         continue;
5197 
5198       MinWidth = std::min(MinWidth,
5199                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5200       MaxWidth = std::max(MaxWidth,
5201                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5202     }
5203   }
5204 
5205   return {MinWidth, MaxWidth};
5206 }
5207 
5208 unsigned LoopVectorizationCostModel::selectInterleaveCount(unsigned VF,
5209                                                            unsigned LoopCost) {
5210   // -- The interleave heuristics --
5211   // We interleave the loop in order to expose ILP and reduce the loop overhead.
5212   // There are many micro-architectural considerations that we can't predict
5213   // at this level. For example, frontend pressure (on decode or fetch) due to
5214   // code size, or the number and capabilities of the execution ports.
5215   //
5216   // We use the following heuristics to select the interleave count:
5217   // 1. If the code has reductions, then we interleave to break the cross
5218   // iteration dependency.
5219   // 2. If the loop is really small, then we interleave to reduce the loop
5220   // overhead.
5221   // 3. We don't interleave if we think that we will spill registers to memory
5222   // due to the increased register pressure.
5223 
5224   if (!isScalarEpilogueAllowed())
5225     return 1;
5226 
5227   // We used the distance for the interleave count.
5228   if (Legal->getMaxSafeDepDistBytes() != -1U)
5229     return 1;
5230 
5231   // Do not interleave loops with a relatively small known or estimated trip
5232   // count.
5233   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
5234   if (BestKnownTC && *BestKnownTC < TinyTripCountInterleaveThreshold)
5235     return 1;
5236 
5237   RegisterUsage R = calculateRegisterUsage({VF})[0];
5238   // We divide by these constants so assume that we have at least one
5239   // instruction that uses at least one register.
5240   for (auto& pair : R.MaxLocalUsers) {
5241     pair.second = std::max(pair.second, 1U);
5242   }
5243 
5244   // We calculate the interleave count using the following formula.
5245   // Subtract the number of loop invariants from the number of available
5246   // registers. These registers are used by all of the interleaved instances.
5247   // Next, divide the remaining registers by the number of registers that is
5248   // required by the loop, in order to estimate how many parallel instances
5249   // fit without causing spills. All of this is rounded down if necessary to be
5250   // a power of two. We want power of two interleave count to simplify any
5251   // addressing operations or alignment considerations.
5252   // We also want power of two interleave counts to ensure that the induction
5253   // variable of the vector loop wraps to zero, when tail is folded by masking;
5254   // this currently happens when OptForSize, in which case IC is set to 1 above.
5255   unsigned IC = UINT_MAX;
5256 
5257   for (auto& pair : R.MaxLocalUsers) {
5258     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5259     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
5260                       << " registers of "
5261                       << TTI.getRegisterClassName(pair.first) << " register class\n");
5262     if (VF == 1) {
5263       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
5264         TargetNumRegisters = ForceTargetNumScalarRegs;
5265     } else {
5266       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
5267         TargetNumRegisters = ForceTargetNumVectorRegs;
5268     }
5269     unsigned MaxLocalUsers = pair.second;
5270     unsigned LoopInvariantRegs = 0;
5271     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
5272       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
5273 
5274     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
5275     // Don't count the induction variable as interleaved.
5276     if (EnableIndVarRegisterHeur) {
5277       TmpIC =
5278           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
5279                         std::max(1U, (MaxLocalUsers - 1)));
5280     }
5281 
5282     IC = std::min(IC, TmpIC);
5283   }
5284 
5285   // Clamp the interleave ranges to reasonable counts.
5286   unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
5287 
5288   // Check if the user has overridden the max.
5289   if (VF == 1) {
5290     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
5291       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
5292   } else {
5293     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
5294       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
5295   }
5296 
5297   // If trip count is known or estimated compile time constant, limit the
5298   // interleave count to be less than the trip count divided by VF.
5299   if (BestKnownTC) {
5300     MaxInterleaveCount = std::min(*BestKnownTC / VF, MaxInterleaveCount);
5301   }
5302 
5303   // If we did not calculate the cost for VF (because the user selected the VF)
5304   // then we calculate the cost of VF here.
5305   if (LoopCost == 0)
5306     LoopCost = expectedCost(VF).first;
5307 
5308   assert(LoopCost && "Non-zero loop cost expected");
5309 
5310   // Clamp the calculated IC to be between the 1 and the max interleave count
5311   // that the target and trip count allows.
5312   if (IC > MaxInterleaveCount)
5313     IC = MaxInterleaveCount;
5314   else if (IC < 1)
5315     IC = 1;
5316 
5317   // Interleave if we vectorized this loop and there is a reduction that could
5318   // benefit from interleaving.
5319   if (VF > 1 && !Legal->getReductionVars().empty()) {
5320     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
5321     return IC;
5322   }
5323 
5324   // Note that if we've already vectorized the loop we will have done the
5325   // runtime check and so interleaving won't require further checks.
5326   bool InterleavingRequiresRuntimePointerCheck =
5327       (VF == 1 && Legal->getRuntimePointerChecking()->Need);
5328 
5329   // We want to interleave small loops in order to reduce the loop overhead and
5330   // potentially expose ILP opportunities.
5331   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
5332   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
5333     // We assume that the cost overhead is 1 and we use the cost model
5334     // to estimate the cost of the loop and interleave until the cost of the
5335     // loop overhead is about 5% of the cost of the loop.
5336     unsigned SmallIC =
5337         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
5338 
5339     // Interleave until store/load ports (estimated by max interleave count) are
5340     // saturated.
5341     unsigned NumStores = Legal->getNumStores();
5342     unsigned NumLoads = Legal->getNumLoads();
5343     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5344     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5345 
5346     // If we have a scalar reduction (vector reductions are already dealt with
5347     // by this point), we can increase the critical path length if the loop
5348     // we're interleaving is inside another loop. Limit, by default to 2, so the
5349     // critical path only gets increased by one reduction operation.
5350     if (!Legal->getReductionVars().empty() && TheLoop->getLoopDepth() > 1) {
5351       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
5352       SmallIC = std::min(SmallIC, F);
5353       StoresIC = std::min(StoresIC, F);
5354       LoadsIC = std::min(LoadsIC, F);
5355     }
5356 
5357     if (EnableLoadStoreRuntimeInterleave &&
5358         std::max(StoresIC, LoadsIC) > SmallIC) {
5359       LLVM_DEBUG(
5360           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
5361       return std::max(StoresIC, LoadsIC);
5362     }
5363 
5364     LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
5365     return SmallIC;
5366   }
5367 
5368   // Interleave if this is a large loop (small loops are already dealt with by
5369   // this point) that could benefit from interleaving.
5370   bool HasReductions = !Legal->getReductionVars().empty();
5371   if (TTI.enableAggressiveInterleaving(HasReductions)) {
5372     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5373     return IC;
5374   }
5375 
5376   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
5377   return 1;
5378 }
5379 
5380 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
5381 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
5382   // This function calculates the register usage by measuring the highest number
5383   // of values that are alive at a single location. Obviously, this is a very
5384   // rough estimation. We scan the loop in a topological order in order and
5385   // assign a number to each instruction. We use RPO to ensure that defs are
5386   // met before their users. We assume that each instruction that has in-loop
5387   // users starts an interval. We record every time that an in-loop value is
5388   // used, so we have a list of the first and last occurrences of each
5389   // instruction. Next, we transpose this data structure into a multi map that
5390   // holds the list of intervals that *end* at a specific location. This multi
5391   // map allows us to perform a linear search. We scan the instructions linearly
5392   // and record each time that a new interval starts, by placing it in a set.
5393   // If we find this value in the multi-map then we remove it from the set.
5394   // The max register usage is the maximum size of the set.
5395   // We also search for instructions that are defined outside the loop, but are
5396   // used inside the loop. We need this number separately from the max-interval
5397   // usage number because when we unroll, loop-invariant values do not take
5398   // more register.
5399   LoopBlocksDFS DFS(TheLoop);
5400   DFS.perform(LI);
5401 
5402   RegisterUsage RU;
5403 
5404   // Each 'key' in the map opens a new interval. The values
5405   // of the map are the index of the 'last seen' usage of the
5406   // instruction that is the key.
5407   using IntervalMap = DenseMap<Instruction *, unsigned>;
5408 
5409   // Maps instruction to its index.
5410   SmallVector<Instruction *, 64> IdxToInstr;
5411   // Marks the end of each interval.
5412   IntervalMap EndPoint;
5413   // Saves the list of instruction indices that are used in the loop.
5414   SmallPtrSet<Instruction *, 8> Ends;
5415   // Saves the list of values that are used in the loop but are
5416   // defined outside the loop, such as arguments and constants.
5417   SmallPtrSet<Value *, 8> LoopInvariants;
5418 
5419   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
5420     for (Instruction &I : BB->instructionsWithoutDebug()) {
5421       IdxToInstr.push_back(&I);
5422 
5423       // Save the end location of each USE.
5424       for (Value *U : I.operands()) {
5425         auto *Instr = dyn_cast<Instruction>(U);
5426 
5427         // Ignore non-instruction values such as arguments, constants, etc.
5428         if (!Instr)
5429           continue;
5430 
5431         // If this instruction is outside the loop then record it and continue.
5432         if (!TheLoop->contains(Instr)) {
5433           LoopInvariants.insert(Instr);
5434           continue;
5435         }
5436 
5437         // Overwrite previous end points.
5438         EndPoint[Instr] = IdxToInstr.size();
5439         Ends.insert(Instr);
5440       }
5441     }
5442   }
5443 
5444   // Saves the list of intervals that end with the index in 'key'.
5445   using InstrList = SmallVector<Instruction *, 2>;
5446   DenseMap<unsigned, InstrList> TransposeEnds;
5447 
5448   // Transpose the EndPoints to a list of values that end at each index.
5449   for (auto &Interval : EndPoint)
5450     TransposeEnds[Interval.second].push_back(Interval.first);
5451 
5452   SmallPtrSet<Instruction *, 8> OpenIntervals;
5453 
5454   // Get the size of the widest register.
5455   unsigned MaxSafeDepDist = -1U;
5456   if (Legal->getMaxSafeDepDistBytes() != -1U)
5457     MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
5458   unsigned WidestRegister =
5459       std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist);
5460   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5461 
5462   SmallVector<RegisterUsage, 8> RUs(VFs.size());
5463   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
5464 
5465   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
5466 
5467   // A lambda that gets the register usage for the given type and VF.
5468   auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
5469     if (Ty->isTokenTy())
5470       return 0U;
5471     unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
5472     return std::max<unsigned>(1, VF * TypeSize / WidestRegister);
5473   };
5474 
5475   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
5476     Instruction *I = IdxToInstr[i];
5477 
5478     // Remove all of the instructions that end at this location.
5479     InstrList &List = TransposeEnds[i];
5480     for (Instruction *ToRemove : List)
5481       OpenIntervals.erase(ToRemove);
5482 
5483     // Ignore instructions that are never used within the loop.
5484     if (Ends.find(I) == Ends.end())
5485       continue;
5486 
5487     // Skip ignored values.
5488     if (ValuesToIgnore.find(I) != ValuesToIgnore.end())
5489       continue;
5490 
5491     // For each VF find the maximum usage of registers.
5492     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
5493       // Count the number of live intervals.
5494       SmallMapVector<unsigned, unsigned, 4> RegUsage;
5495 
5496       if (VFs[j] == 1) {
5497         for (auto Inst : OpenIntervals) {
5498           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
5499           if (RegUsage.find(ClassID) == RegUsage.end())
5500             RegUsage[ClassID] = 1;
5501           else
5502             RegUsage[ClassID] += 1;
5503         }
5504       } else {
5505         collectUniformsAndScalars(VFs[j]);
5506         for (auto Inst : OpenIntervals) {
5507           // Skip ignored values for VF > 1.
5508           if (VecValuesToIgnore.find(Inst) != VecValuesToIgnore.end())
5509             continue;
5510           if (isScalarAfterVectorization(Inst, VFs[j])) {
5511             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
5512             if (RegUsage.find(ClassID) == RegUsage.end())
5513               RegUsage[ClassID] = 1;
5514             else
5515               RegUsage[ClassID] += 1;
5516           } else {
5517             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
5518             if (RegUsage.find(ClassID) == RegUsage.end())
5519               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
5520             else
5521               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
5522           }
5523         }
5524       }
5525 
5526       for (auto& pair : RegUsage) {
5527         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
5528           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
5529         else
5530           MaxUsages[j][pair.first] = pair.second;
5531       }
5532     }
5533 
5534     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
5535                       << OpenIntervals.size() << '\n');
5536 
5537     // Add the current instruction to the list of open intervals.
5538     OpenIntervals.insert(I);
5539   }
5540 
5541   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
5542     SmallMapVector<unsigned, unsigned, 4> Invariant;
5543 
5544     for (auto Inst : LoopInvariants) {
5545       unsigned Usage = VFs[i] == 1 ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
5546       unsigned ClassID = TTI.getRegisterClassForType(VFs[i] > 1, Inst->getType());
5547       if (Invariant.find(ClassID) == Invariant.end())
5548         Invariant[ClassID] = Usage;
5549       else
5550         Invariant[ClassID] += Usage;
5551     }
5552 
5553     LLVM_DEBUG({
5554       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
5555       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
5556              << " item\n";
5557       for (const auto &pair : MaxUsages[i]) {
5558         dbgs() << "LV(REG): RegisterClass: "
5559                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
5560                << " registers\n";
5561       }
5562       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
5563              << " item\n";
5564       for (const auto &pair : Invariant) {
5565         dbgs() << "LV(REG): RegisterClass: "
5566                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
5567                << " registers\n";
5568       }
5569     });
5570 
5571     RU.LoopInvariantRegs = Invariant;
5572     RU.MaxLocalUsers = MaxUsages[i];
5573     RUs[i] = RU;
5574   }
5575 
5576   return RUs;
5577 }
5578 
5579 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
5580   // TODO: Cost model for emulated masked load/store is completely
5581   // broken. This hack guides the cost model to use an artificially
5582   // high enough value to practically disable vectorization with such
5583   // operations, except where previously deployed legality hack allowed
5584   // using very low cost values. This is to avoid regressions coming simply
5585   // from moving "masked load/store" check from legality to cost model.
5586   // Masked Load/Gather emulation was previously never allowed.
5587   // Limited number of Masked Store/Scatter emulation was allowed.
5588   assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction");
5589   return isa<LoadInst>(I) ||
5590          (isa<StoreInst>(I) &&
5591           NumPredStores > NumberOfStoresToPredicate);
5592 }
5593 
5594 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) {
5595   // If we aren't vectorizing the loop, or if we've already collected the
5596   // instructions to scalarize, there's nothing to do. Collection may already
5597   // have occurred if we have a user-selected VF and are now computing the
5598   // expected cost for interleaving.
5599   if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end())
5600     return;
5601 
5602   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
5603   // not profitable to scalarize any instructions, the presence of VF in the
5604   // map will indicate that we've analyzed it already.
5605   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
5606 
5607   // Find all the instructions that are scalar with predication in the loop and
5608   // determine if it would be better to not if-convert the blocks they are in.
5609   // If so, we also record the instructions to scalarize.
5610   for (BasicBlock *BB : TheLoop->blocks()) {
5611     if (!blockNeedsPredication(BB))
5612       continue;
5613     for (Instruction &I : *BB)
5614       if (isScalarWithPredication(&I)) {
5615         ScalarCostsTy ScalarCosts;
5616         // Do not apply discount logic if hacked cost is needed
5617         // for emulated masked memrefs.
5618         if (!useEmulatedMaskMemRefHack(&I) &&
5619             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
5620           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
5621         // Remember that BB will remain after vectorization.
5622         PredicatedBBsAfterVectorization.insert(BB);
5623       }
5624   }
5625 }
5626 
5627 int LoopVectorizationCostModel::computePredInstDiscount(
5628     Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts,
5629     unsigned VF) {
5630   assert(!isUniformAfterVectorization(PredInst, VF) &&
5631          "Instruction marked uniform-after-vectorization will be predicated");
5632 
5633   // Initialize the discount to zero, meaning that the scalar version and the
5634   // vector version cost the same.
5635   int Discount = 0;
5636 
5637   // Holds instructions to analyze. The instructions we visit are mapped in
5638   // ScalarCosts. Those instructions are the ones that would be scalarized if
5639   // we find that the scalar version costs less.
5640   SmallVector<Instruction *, 8> Worklist;
5641 
5642   // Returns true if the given instruction can be scalarized.
5643   auto canBeScalarized = [&](Instruction *I) -> bool {
5644     // We only attempt to scalarize instructions forming a single-use chain
5645     // from the original predicated block that would otherwise be vectorized.
5646     // Although not strictly necessary, we give up on instructions we know will
5647     // already be scalar to avoid traversing chains that are unlikely to be
5648     // beneficial.
5649     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
5650         isScalarAfterVectorization(I, VF))
5651       return false;
5652 
5653     // If the instruction is scalar with predication, it will be analyzed
5654     // separately. We ignore it within the context of PredInst.
5655     if (isScalarWithPredication(I))
5656       return false;
5657 
5658     // If any of the instruction's operands are uniform after vectorization,
5659     // the instruction cannot be scalarized. This prevents, for example, a
5660     // masked load from being scalarized.
5661     //
5662     // We assume we will only emit a value for lane zero of an instruction
5663     // marked uniform after vectorization, rather than VF identical values.
5664     // Thus, if we scalarize an instruction that uses a uniform, we would
5665     // create uses of values corresponding to the lanes we aren't emitting code
5666     // for. This behavior can be changed by allowing getScalarValue to clone
5667     // the lane zero values for uniforms rather than asserting.
5668     for (Use &U : I->operands())
5669       if (auto *J = dyn_cast<Instruction>(U.get()))
5670         if (isUniformAfterVectorization(J, VF))
5671           return false;
5672 
5673     // Otherwise, we can scalarize the instruction.
5674     return true;
5675   };
5676 
5677   // Compute the expected cost discount from scalarizing the entire expression
5678   // feeding the predicated instruction. We currently only consider expressions
5679   // that are single-use instruction chains.
5680   Worklist.push_back(PredInst);
5681   while (!Worklist.empty()) {
5682     Instruction *I = Worklist.pop_back_val();
5683 
5684     // If we've already analyzed the instruction, there's nothing to do.
5685     if (ScalarCosts.find(I) != ScalarCosts.end())
5686       continue;
5687 
5688     // Compute the cost of the vector instruction. Note that this cost already
5689     // includes the scalarization overhead of the predicated instruction.
5690     unsigned VectorCost = getInstructionCost(I, VF).first;
5691 
5692     // Compute the cost of the scalarized instruction. This cost is the cost of
5693     // the instruction as if it wasn't if-converted and instead remained in the
5694     // predicated block. We will scale this cost by block probability after
5695     // computing the scalarization overhead.
5696     unsigned ScalarCost = VF * getInstructionCost(I, 1).first;
5697 
5698     // Compute the scalarization overhead of needed insertelement instructions
5699     // and phi nodes.
5700     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
5701       ScalarCost +=
5702           TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF),
5703                                        APInt::getAllOnesValue(VF), true, false);
5704       ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI);
5705     }
5706 
5707     // Compute the scalarization overhead of needed extractelement
5708     // instructions. For each of the instruction's operands, if the operand can
5709     // be scalarized, add it to the worklist; otherwise, account for the
5710     // overhead.
5711     for (Use &U : I->operands())
5712       if (auto *J = dyn_cast<Instruction>(U.get())) {
5713         assert(VectorType::isValidElementType(J->getType()) &&
5714                "Instruction has non-scalar type");
5715         if (canBeScalarized(J))
5716           Worklist.push_back(J);
5717         else if (needsExtract(J, VF))
5718           ScalarCost += TTI.getScalarizationOverhead(
5719               ToVectorTy(J->getType(), VF), APInt::getAllOnesValue(VF), false,
5720               true);
5721       }
5722 
5723     // Scale the total scalar cost by block probability.
5724     ScalarCost /= getReciprocalPredBlockProb();
5725 
5726     // Compute the discount. A non-negative discount means the vector version
5727     // of the instruction costs more, and scalarizing would be beneficial.
5728     Discount += VectorCost - ScalarCost;
5729     ScalarCosts[I] = ScalarCost;
5730   }
5731 
5732   return Discount;
5733 }
5734 
5735 LoopVectorizationCostModel::VectorizationCostTy
5736 LoopVectorizationCostModel::expectedCost(unsigned VF) {
5737   VectorizationCostTy Cost;
5738 
5739   // For each block.
5740   for (BasicBlock *BB : TheLoop->blocks()) {
5741     VectorizationCostTy BlockCost;
5742 
5743     // For each instruction in the old loop.
5744     for (Instruction &I : BB->instructionsWithoutDebug()) {
5745       // Skip ignored values.
5746       if (ValuesToIgnore.find(&I) != ValuesToIgnore.end() ||
5747           (VF > 1 && VecValuesToIgnore.find(&I) != VecValuesToIgnore.end()))
5748         continue;
5749 
5750       VectorizationCostTy C = getInstructionCost(&I, VF);
5751 
5752       // Check if we should override the cost.
5753       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
5754         C.first = ForceTargetInstructionCost;
5755 
5756       BlockCost.first += C.first;
5757       BlockCost.second |= C.second;
5758       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
5759                         << " for VF " << VF << " For instruction: " << I
5760                         << '\n');
5761     }
5762 
5763     // If we are vectorizing a predicated block, it will have been
5764     // if-converted. This means that the block's instructions (aside from
5765     // stores and instructions that may divide by zero) will now be
5766     // unconditionally executed. For the scalar case, we may not always execute
5767     // the predicated block. Thus, scale the block's cost by the probability of
5768     // executing it.
5769     if (VF == 1 && blockNeedsPredication(BB))
5770       BlockCost.first /= getReciprocalPredBlockProb();
5771 
5772     Cost.first += BlockCost.first;
5773     Cost.second |= BlockCost.second;
5774   }
5775 
5776   return Cost;
5777 }
5778 
5779 /// Gets Address Access SCEV after verifying that the access pattern
5780 /// is loop invariant except the induction variable dependence.
5781 ///
5782 /// This SCEV can be sent to the Target in order to estimate the address
5783 /// calculation cost.
5784 static const SCEV *getAddressAccessSCEV(
5785               Value *Ptr,
5786               LoopVectorizationLegality *Legal,
5787               PredicatedScalarEvolution &PSE,
5788               const Loop *TheLoop) {
5789 
5790   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
5791   if (!Gep)
5792     return nullptr;
5793 
5794   // We are looking for a gep with all loop invariant indices except for one
5795   // which should be an induction variable.
5796   auto SE = PSE.getSE();
5797   unsigned NumOperands = Gep->getNumOperands();
5798   for (unsigned i = 1; i < NumOperands; ++i) {
5799     Value *Opd = Gep->getOperand(i);
5800     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
5801         !Legal->isInductionVariable(Opd))
5802       return nullptr;
5803   }
5804 
5805   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
5806   return PSE.getSCEV(Ptr);
5807 }
5808 
5809 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
5810   return Legal->hasStride(I->getOperand(0)) ||
5811          Legal->hasStride(I->getOperand(1));
5812 }
5813 
5814 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5815                                                                  unsigned VF) {
5816   assert(VF > 1 && "Scalarization cost of instruction implies vectorization.");
5817   Type *ValTy = getMemInstValueType(I);
5818   auto SE = PSE.getSE();
5819 
5820   unsigned AS = getLoadStoreAddressSpace(I);
5821   Value *Ptr = getLoadStorePointerOperand(I);
5822   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
5823 
5824   // Figure out whether the access is strided and get the stride value
5825   // if it's known in compile time
5826   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
5827 
5828   // Get the cost of the scalar memory instruction and address computation.
5829   unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
5830 
5831   // Don't pass *I here, since it is scalar but will actually be part of a
5832   // vectorized loop where the user of it is a vectorized instruction.
5833   const MaybeAlign Alignment = getLoadStoreAlignment(I);
5834   Cost += VF * TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(),
5835                                    Alignment, AS);
5836 
5837   // Get the overhead of the extractelement and insertelement instructions
5838   // we might create due to scalarization.
5839   Cost += getScalarizationOverhead(I, VF);
5840 
5841   // If we have a predicated store, it may not be executed for each vector
5842   // lane. Scale the cost by the probability of executing the predicated
5843   // block.
5844   if (isPredicatedInst(I)) {
5845     Cost /= getReciprocalPredBlockProb();
5846 
5847     if (useEmulatedMaskMemRefHack(I))
5848       // Artificially setting to a high enough value to practically disable
5849       // vectorization with such operations.
5850       Cost = 3000000;
5851   }
5852 
5853   return Cost;
5854 }
5855 
5856 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
5857                                                              unsigned VF) {
5858   Type *ValTy = getMemInstValueType(I);
5859   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5860   Value *Ptr = getLoadStorePointerOperand(I);
5861   unsigned AS = getLoadStoreAddressSpace(I);
5862   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
5863 
5864   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5865          "Stride should be 1 or -1 for consecutive memory access");
5866   const MaybeAlign Alignment = getLoadStoreAlignment(I);
5867   unsigned Cost = 0;
5868   if (Legal->isMaskRequired(I))
5869     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy,
5870                                       Alignment ? Alignment->value() : 0, AS);
5871   else
5872     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I);
5873 
5874   bool Reverse = ConsecutiveStride < 0;
5875   if (Reverse)
5876     Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5877   return Cost;
5878 }
5879 
5880 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
5881                                                          unsigned VF) {
5882   Type *ValTy = getMemInstValueType(I);
5883   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5884   const MaybeAlign Alignment = getLoadStoreAlignment(I);
5885   unsigned AS = getLoadStoreAddressSpace(I);
5886   if (isa<LoadInst>(I)) {
5887     return TTI.getAddressComputationCost(ValTy) +
5888            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) +
5889            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
5890   }
5891   StoreInst *SI = cast<StoreInst>(I);
5892 
5893   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
5894   return TTI.getAddressComputationCost(ValTy) +
5895          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS) +
5896          (isLoopInvariantStoreValue
5897               ? 0
5898               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
5899                                        VF - 1));
5900 }
5901 
5902 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
5903                                                           unsigned VF) {
5904   Type *ValTy = getMemInstValueType(I);
5905   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5906   const MaybeAlign Alignment = getLoadStoreAlignment(I);
5907   Value *Ptr = getLoadStorePointerOperand(I);
5908 
5909   return TTI.getAddressComputationCost(VectorTy) +
5910          TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
5911                                     Legal->isMaskRequired(I),
5912                                     Alignment ? Alignment->value() : 0, I);
5913 }
5914 
5915 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
5916                                                             unsigned VF) {
5917   Type *ValTy = getMemInstValueType(I);
5918   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
5919   unsigned AS = getLoadStoreAddressSpace(I);
5920 
5921   auto Group = getInterleavedAccessGroup(I);
5922   assert(Group && "Fail to get an interleaved access group.");
5923 
5924   unsigned InterleaveFactor = Group->getFactor();
5925   VectorType *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
5926 
5927   // Holds the indices of existing members in an interleaved load group.
5928   // An interleaved store group doesn't need this as it doesn't allow gaps.
5929   SmallVector<unsigned, 4> Indices;
5930   if (isa<LoadInst>(I)) {
5931     for (unsigned i = 0; i < InterleaveFactor; i++)
5932       if (Group->getMember(i))
5933         Indices.push_back(i);
5934   }
5935 
5936   // Calculate the cost of the whole interleaved group.
5937   bool UseMaskForGaps =
5938       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
5939   unsigned Cost = TTI.getInterleavedMemoryOpCost(
5940       I->getOpcode(), WideVecTy, Group->getFactor(), Indices,
5941       Group->getAlign().value(), AS, Legal->isMaskRequired(I), UseMaskForGaps);
5942 
5943   if (Group->isReverse()) {
5944     // TODO: Add support for reversed masked interleaved access.
5945     assert(!Legal->isMaskRequired(I) &&
5946            "Reverse masked interleaved access not supported.");
5947     Cost += Group->getNumMembers() *
5948             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5949   }
5950   return Cost;
5951 }
5952 
5953 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
5954                                                               unsigned VF) {
5955   // Calculate scalar cost only. Vectorization cost should be ready at this
5956   // moment.
5957   if (VF == 1) {
5958     Type *ValTy = getMemInstValueType(I);
5959     const MaybeAlign Alignment = getLoadStoreAlignment(I);
5960     unsigned AS = getLoadStoreAddressSpace(I);
5961 
5962     return TTI.getAddressComputationCost(ValTy) +
5963            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I);
5964   }
5965   return getWideningCost(I, VF);
5966 }
5967 
5968 LoopVectorizationCostModel::VectorizationCostTy
5969 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
5970   // If we know that this instruction will remain uniform, check the cost of
5971   // the scalar version.
5972   if (isUniformAfterVectorization(I, VF))
5973     VF = 1;
5974 
5975   if (VF > 1 && isProfitableToScalarize(I, VF))
5976     return VectorizationCostTy(InstsToScalarize[VF][I], false);
5977 
5978   // Forced scalars do not have any scalarization overhead.
5979   auto ForcedScalar = ForcedScalars.find(VF);
5980   if (VF > 1 && ForcedScalar != ForcedScalars.end()) {
5981     auto InstSet = ForcedScalar->second;
5982     if (InstSet.find(I) != InstSet.end())
5983       return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false);
5984   }
5985 
5986   Type *VectorTy;
5987   unsigned C = getInstructionCost(I, VF, VectorTy);
5988 
5989   bool TypeNotScalarized =
5990       VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF;
5991   return VectorizationCostTy(C, TypeNotScalarized);
5992 }
5993 
5994 unsigned LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
5995                                                               unsigned VF) {
5996 
5997   if (VF == 1)
5998     return 0;
5999 
6000   unsigned Cost = 0;
6001   Type *RetTy = ToVectorTy(I->getType(), VF);
6002   if (!RetTy->isVoidTy() &&
6003       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
6004     Cost += TTI.getScalarizationOverhead(RetTy, APInt::getAllOnesValue(VF),
6005                                          true, false);
6006 
6007   // Some targets keep addresses scalar.
6008   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
6009     return Cost;
6010 
6011   // Some targets support efficient element stores.
6012   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
6013     return Cost;
6014 
6015   // Collect operands to consider.
6016   CallInst *CI = dyn_cast<CallInst>(I);
6017   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
6018 
6019   // Skip operands that do not require extraction/scalarization and do not incur
6020   // any overhead.
6021   return Cost + TTI.getOperandsScalarizationOverhead(
6022                     filterExtractingOperands(Ops, VF), VF);
6023 }
6024 
6025 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) {
6026   if (VF == 1)
6027     return;
6028   NumPredStores = 0;
6029   for (BasicBlock *BB : TheLoop->blocks()) {
6030     // For each instruction in the old loop.
6031     for (Instruction &I : *BB) {
6032       Value *Ptr =  getLoadStorePointerOperand(&I);
6033       if (!Ptr)
6034         continue;
6035 
6036       // TODO: We should generate better code and update the cost model for
6037       // predicated uniform stores. Today they are treated as any other
6038       // predicated store (see added test cases in
6039       // invariant-store-vectorization.ll).
6040       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
6041         NumPredStores++;
6042 
6043       if (Legal->isUniform(Ptr) &&
6044           // Conditional loads and stores should be scalarized and predicated.
6045           // isScalarWithPredication cannot be used here since masked
6046           // gather/scatters are not considered scalar with predication.
6047           !Legal->blockNeedsPredication(I.getParent())) {
6048         // TODO: Avoid replicating loads and stores instead of
6049         // relying on instcombine to remove them.
6050         // Load: Scalar load + broadcast
6051         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
6052         unsigned Cost = getUniformMemOpCost(&I, VF);
6053         setWideningDecision(&I, VF, CM_Scalarize, Cost);
6054         continue;
6055       }
6056 
6057       // We assume that widening is the best solution when possible.
6058       if (memoryInstructionCanBeWidened(&I, VF)) {
6059         unsigned Cost = getConsecutiveMemOpCost(&I, VF);
6060         int ConsecutiveStride =
6061                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
6062         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6063                "Expected consecutive stride.");
6064         InstWidening Decision =
6065             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
6066         setWideningDecision(&I, VF, Decision, Cost);
6067         continue;
6068       }
6069 
6070       // Choose between Interleaving, Gather/Scatter or Scalarization.
6071       unsigned InterleaveCost = std::numeric_limits<unsigned>::max();
6072       unsigned NumAccesses = 1;
6073       if (isAccessInterleaved(&I)) {
6074         auto Group = getInterleavedAccessGroup(&I);
6075         assert(Group && "Fail to get an interleaved access group.");
6076 
6077         // Make one decision for the whole group.
6078         if (getWideningDecision(&I, VF) != CM_Unknown)
6079           continue;
6080 
6081         NumAccesses = Group->getNumMembers();
6082         if (interleavedAccessCanBeWidened(&I, VF))
6083           InterleaveCost = getInterleaveGroupCost(&I, VF);
6084       }
6085 
6086       unsigned GatherScatterCost =
6087           isLegalGatherOrScatter(&I)
6088               ? getGatherScatterCost(&I, VF) * NumAccesses
6089               : std::numeric_limits<unsigned>::max();
6090 
6091       unsigned ScalarizationCost =
6092           getMemInstScalarizationCost(&I, VF) * NumAccesses;
6093 
6094       // Choose better solution for the current VF,
6095       // write down this decision and use it during vectorization.
6096       unsigned Cost;
6097       InstWidening Decision;
6098       if (InterleaveCost <= GatherScatterCost &&
6099           InterleaveCost < ScalarizationCost) {
6100         Decision = CM_Interleave;
6101         Cost = InterleaveCost;
6102       } else if (GatherScatterCost < ScalarizationCost) {
6103         Decision = CM_GatherScatter;
6104         Cost = GatherScatterCost;
6105       } else {
6106         Decision = CM_Scalarize;
6107         Cost = ScalarizationCost;
6108       }
6109       // If the instructions belongs to an interleave group, the whole group
6110       // receives the same decision. The whole group receives the cost, but
6111       // the cost will actually be assigned to one instruction.
6112       if (auto Group = getInterleavedAccessGroup(&I))
6113         setWideningDecision(Group, VF, Decision, Cost);
6114       else
6115         setWideningDecision(&I, VF, Decision, Cost);
6116     }
6117   }
6118 
6119   // Make sure that any load of address and any other address computation
6120   // remains scalar unless there is gather/scatter support. This avoids
6121   // inevitable extracts into address registers, and also has the benefit of
6122   // activating LSR more, since that pass can't optimize vectorized
6123   // addresses.
6124   if (TTI.prefersVectorizedAddressing())
6125     return;
6126 
6127   // Start with all scalar pointer uses.
6128   SmallPtrSet<Instruction *, 8> AddrDefs;
6129   for (BasicBlock *BB : TheLoop->blocks())
6130     for (Instruction &I : *BB) {
6131       Instruction *PtrDef =
6132         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
6133       if (PtrDef && TheLoop->contains(PtrDef) &&
6134           getWideningDecision(&I, VF) != CM_GatherScatter)
6135         AddrDefs.insert(PtrDef);
6136     }
6137 
6138   // Add all instructions used to generate the addresses.
6139   SmallVector<Instruction *, 4> Worklist;
6140   for (auto *I : AddrDefs)
6141     Worklist.push_back(I);
6142   while (!Worklist.empty()) {
6143     Instruction *I = Worklist.pop_back_val();
6144     for (auto &Op : I->operands())
6145       if (auto *InstOp = dyn_cast<Instruction>(Op))
6146         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
6147             AddrDefs.insert(InstOp).second)
6148           Worklist.push_back(InstOp);
6149   }
6150 
6151   for (auto *I : AddrDefs) {
6152     if (isa<LoadInst>(I)) {
6153       // Setting the desired widening decision should ideally be handled in
6154       // by cost functions, but since this involves the task of finding out
6155       // if the loaded register is involved in an address computation, it is
6156       // instead changed here when we know this is the case.
6157       InstWidening Decision = getWideningDecision(I, VF);
6158       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
6159         // Scalarize a widened load of address.
6160         setWideningDecision(I, VF, CM_Scalarize,
6161                             (VF * getMemoryInstructionCost(I, 1)));
6162       else if (auto Group = getInterleavedAccessGroup(I)) {
6163         // Scalarize an interleave group of address loads.
6164         for (unsigned I = 0; I < Group->getFactor(); ++I) {
6165           if (Instruction *Member = Group->getMember(I))
6166             setWideningDecision(Member, VF, CM_Scalarize,
6167                                 (VF * getMemoryInstructionCost(Member, 1)));
6168         }
6169       }
6170     } else
6171       // Make sure I gets scalarized and a cost estimate without
6172       // scalarization overhead.
6173       ForcedScalars[VF].insert(I);
6174   }
6175 }
6176 
6177 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6178                                                         unsigned VF,
6179                                                         Type *&VectorTy) {
6180   Type *RetTy = I->getType();
6181   if (canTruncateToMinimalBitwidth(I, VF))
6182     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
6183   VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
6184   auto SE = PSE.getSE();
6185 
6186   // TODO: We need to estimate the cost of intrinsic calls.
6187   switch (I->getOpcode()) {
6188   case Instruction::GetElementPtr:
6189     // We mark this instruction as zero-cost because the cost of GEPs in
6190     // vectorized code depends on whether the corresponding memory instruction
6191     // is scalarized or not. Therefore, we handle GEPs with the memory
6192     // instruction cost.
6193     return 0;
6194   case Instruction::Br: {
6195     // In cases of scalarized and predicated instructions, there will be VF
6196     // predicated blocks in the vectorized loop. Each branch around these
6197     // blocks requires also an extract of its vector compare i1 element.
6198     bool ScalarPredicatedBB = false;
6199     BranchInst *BI = cast<BranchInst>(I);
6200     if (VF > 1 && BI->isConditional() &&
6201         (PredicatedBBsAfterVectorization.find(BI->getSuccessor(0)) !=
6202              PredicatedBBsAfterVectorization.end() ||
6203          PredicatedBBsAfterVectorization.find(BI->getSuccessor(1)) !=
6204              PredicatedBBsAfterVectorization.end()))
6205       ScalarPredicatedBB = true;
6206 
6207     if (ScalarPredicatedBB) {
6208       // Return cost for branches around scalarized and predicated blocks.
6209       Type *Vec_i1Ty =
6210           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
6211       return (TTI.getScalarizationOverhead(Vec_i1Ty, APInt::getAllOnesValue(VF),
6212                                            false, true) +
6213               (TTI.getCFInstrCost(Instruction::Br) * VF));
6214     } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1)
6215       // The back-edge branch will remain, as will all scalar branches.
6216       return TTI.getCFInstrCost(Instruction::Br);
6217     else
6218       // This branch will be eliminated by if-conversion.
6219       return 0;
6220     // Note: We currently assume zero cost for an unconditional branch inside
6221     // a predicated block since it will become a fall-through, although we
6222     // may decide in the future to call TTI for all branches.
6223   }
6224   case Instruction::PHI: {
6225     auto *Phi = cast<PHINode>(I);
6226 
6227     // First-order recurrences are replaced by vector shuffles inside the loop.
6228     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
6229     if (VF > 1 && Legal->isFirstOrderRecurrence(Phi))
6230       return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
6231                                 cast<VectorType>(VectorTy), VF - 1,
6232                                 VectorType::get(RetTy, 1));
6233 
6234     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
6235     // converted into select instructions. We require N - 1 selects per phi
6236     // node, where N is the number of incoming values.
6237     if (VF > 1 && Phi->getParent() != TheLoop->getHeader())
6238       return (Phi->getNumIncomingValues() - 1) *
6239              TTI.getCmpSelInstrCost(
6240                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
6241                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF));
6242 
6243     return TTI.getCFInstrCost(Instruction::PHI);
6244   }
6245   case Instruction::UDiv:
6246   case Instruction::SDiv:
6247   case Instruction::URem:
6248   case Instruction::SRem:
6249     // If we have a predicated instruction, it may not be executed for each
6250     // vector lane. Get the scalarization cost and scale this amount by the
6251     // probability of executing the predicated block. If the instruction is not
6252     // predicated, we fall through to the next case.
6253     if (VF > 1 && isScalarWithPredication(I)) {
6254       unsigned Cost = 0;
6255 
6256       // These instructions have a non-void type, so account for the phi nodes
6257       // that we will create. This cost is likely to be zero. The phi node
6258       // cost, if any, should be scaled by the block probability because it
6259       // models a copy at the end of each predicated block.
6260       Cost += VF * TTI.getCFInstrCost(Instruction::PHI);
6261 
6262       // The cost of the non-predicated instruction.
6263       Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy);
6264 
6265       // The cost of insertelement and extractelement instructions needed for
6266       // scalarization.
6267       Cost += getScalarizationOverhead(I, VF);
6268 
6269       // Scale the cost by the probability of executing the predicated blocks.
6270       // This assumes the predicated block for each vector lane is equally
6271       // likely.
6272       return Cost / getReciprocalPredBlockProb();
6273     }
6274     LLVM_FALLTHROUGH;
6275   case Instruction::Add:
6276   case Instruction::FAdd:
6277   case Instruction::Sub:
6278   case Instruction::FSub:
6279   case Instruction::Mul:
6280   case Instruction::FMul:
6281   case Instruction::FDiv:
6282   case Instruction::FRem:
6283   case Instruction::Shl:
6284   case Instruction::LShr:
6285   case Instruction::AShr:
6286   case Instruction::And:
6287   case Instruction::Or:
6288   case Instruction::Xor: {
6289     // Since we will replace the stride by 1 the multiplication should go away.
6290     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
6291       return 0;
6292     // Certain instructions can be cheaper to vectorize if they have a constant
6293     // second vector operand. One example of this are shifts on x86.
6294     Value *Op2 = I->getOperand(1);
6295     TargetTransformInfo::OperandValueProperties Op2VP;
6296     TargetTransformInfo::OperandValueKind Op2VK =
6297         TTI.getOperandInfo(Op2, Op2VP);
6298     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
6299       Op2VK = TargetTransformInfo::OK_UniformValue;
6300 
6301     SmallVector<const Value *, 4> Operands(I->operand_values());
6302     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6303     return N * TTI.getArithmeticInstrCost(
6304                    I->getOpcode(), VectorTy, TargetTransformInfo::OK_AnyValue,
6305                    Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
6306   }
6307   case Instruction::FNeg: {
6308     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6309     return N * TTI.getArithmeticInstrCost(
6310                    I->getOpcode(), VectorTy, TargetTransformInfo::OK_AnyValue,
6311                    TargetTransformInfo::OK_AnyValue,
6312                    TargetTransformInfo::OP_None, TargetTransformInfo::OP_None,
6313                    I->getOperand(0), I);
6314   }
6315   case Instruction::Select: {
6316     SelectInst *SI = cast<SelectInst>(I);
6317     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
6318     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
6319     Type *CondTy = SI->getCondition()->getType();
6320     if (!ScalarCond)
6321       CondTy = VectorType::get(CondTy, VF);
6322 
6323     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I);
6324   }
6325   case Instruction::ICmp:
6326   case Instruction::FCmp: {
6327     Type *ValTy = I->getOperand(0)->getType();
6328     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
6329     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
6330       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
6331     VectorTy = ToVectorTy(ValTy, VF);
6332     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I);
6333   }
6334   case Instruction::Store:
6335   case Instruction::Load: {
6336     unsigned Width = VF;
6337     if (Width > 1) {
6338       InstWidening Decision = getWideningDecision(I, Width);
6339       assert(Decision != CM_Unknown &&
6340              "CM decision should be taken at this point");
6341       if (Decision == CM_Scalarize)
6342         Width = 1;
6343     }
6344     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
6345     return getMemoryInstructionCost(I, VF);
6346   }
6347   case Instruction::ZExt:
6348   case Instruction::SExt:
6349   case Instruction::FPToUI:
6350   case Instruction::FPToSI:
6351   case Instruction::FPExt:
6352   case Instruction::PtrToInt:
6353   case Instruction::IntToPtr:
6354   case Instruction::SIToFP:
6355   case Instruction::UIToFP:
6356   case Instruction::Trunc:
6357   case Instruction::FPTrunc:
6358   case Instruction::BitCast: {
6359     // We optimize the truncation of induction variables having constant
6360     // integer steps. The cost of these truncations is the same as the scalar
6361     // operation.
6362     if (isOptimizableIVTruncate(I, VF)) {
6363       auto *Trunc = cast<TruncInst>(I);
6364       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6365                                   Trunc->getSrcTy(), Trunc);
6366     }
6367 
6368     Type *SrcScalarTy = I->getOperand(0)->getType();
6369     Type *SrcVecTy =
6370         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6371     if (canTruncateToMinimalBitwidth(I, VF)) {
6372       // This cast is going to be shrunk. This may remove the cast or it might
6373       // turn it into slightly different cast. For example, if MinBW == 16,
6374       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
6375       //
6376       // Calculate the modified src and dest types.
6377       Type *MinVecTy = VectorTy;
6378       if (I->getOpcode() == Instruction::Trunc) {
6379         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
6380         VectorTy =
6381             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6382       } else if (I->getOpcode() == Instruction::ZExt ||
6383                  I->getOpcode() == Instruction::SExt) {
6384         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
6385         VectorTy =
6386             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6387       }
6388     }
6389 
6390     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6391     return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I);
6392   }
6393   case Instruction::Call: {
6394     bool NeedToScalarize;
6395     CallInst *CI = cast<CallInst>(I);
6396     unsigned CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
6397     if (getVectorIntrinsicIDForCall(CI, TLI))
6398       return std::min(CallCost, getVectorIntrinsicCost(CI, VF));
6399     return CallCost;
6400   }
6401   default:
6402     // The cost of executing VF copies of the scalar instruction. This opcode
6403     // is unknown. Assume that it is the same as 'mul'.
6404     return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) +
6405            getScalarizationOverhead(I, VF);
6406   } // end of switch.
6407 }
6408 
6409 char LoopVectorize::ID = 0;
6410 
6411 static const char lv_name[] = "Loop Vectorization";
6412 
6413 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
6414 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
6415 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
6416 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
6417 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
6418 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
6419 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
6420 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
6421 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
6422 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
6423 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
6424 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
6425 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
6426 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
6427 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
6428 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
6429 
6430 namespace llvm {
6431 
6432 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
6433 
6434 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
6435                               bool VectorizeOnlyWhenForced) {
6436   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
6437 }
6438 
6439 } // end namespace llvm
6440 
6441 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
6442   // Check if the pointer operand of a load or store instruction is
6443   // consecutive.
6444   if (auto *Ptr = getLoadStorePointerOperand(Inst))
6445     return Legal->isConsecutivePtr(Ptr);
6446   return false;
6447 }
6448 
6449 void LoopVectorizationCostModel::collectValuesToIgnore() {
6450   // Ignore ephemeral values.
6451   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
6452 
6453   // Ignore type-promoting instructions we identified during reduction
6454   // detection.
6455   for (auto &Reduction : Legal->getReductionVars()) {
6456     RecurrenceDescriptor &RedDes = Reduction.second;
6457     SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6458     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6459   }
6460   // Ignore type-casting instructions we identified during induction
6461   // detection.
6462   for (auto &Induction : Legal->getInductionVars()) {
6463     InductionDescriptor &IndDes = Induction.second;
6464     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6465     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6466   }
6467 }
6468 
6469 // TODO: we could return a pair of values that specify the max VF and
6470 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
6471 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
6472 // doesn't have a cost model that can choose which plan to execute if
6473 // more than one is generated.
6474 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
6475                                  LoopVectorizationCostModel &CM) {
6476   unsigned WidestType;
6477   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
6478   return WidestVectorRegBits / WidestType;
6479 }
6480 
6481 VectorizationFactor
6482 LoopVectorizationPlanner::planInVPlanNativePath(unsigned UserVF) {
6483   unsigned VF = UserVF;
6484   // Outer loop handling: They may require CFG and instruction level
6485   // transformations before even evaluating whether vectorization is profitable.
6486   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6487   // the vectorization pipeline.
6488   if (!OrigLoop->empty()) {
6489     // If the user doesn't provide a vectorization factor, determine a
6490     // reasonable one.
6491     if (!UserVF) {
6492       VF = determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM);
6493       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
6494 
6495       // Make sure we have a VF > 1 for stress testing.
6496       if (VPlanBuildStressTest && VF < 2) {
6497         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
6498                           << "overriding computed VF.\n");
6499         VF = 4;
6500       }
6501     }
6502     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6503     assert(isPowerOf2_32(VF) && "VF needs to be a power of two");
6504     LLVM_DEBUG(dbgs() << "LV: Using " << (UserVF ? "user " : "") << "VF " << VF
6505                       << " to build VPlans.\n");
6506     buildVPlans(VF, VF);
6507 
6508     // For VPlan build stress testing, we bail out after VPlan construction.
6509     if (VPlanBuildStressTest)
6510       return VectorizationFactor::Disabled();
6511 
6512     return {VF, 0};
6513   }
6514 
6515   LLVM_DEBUG(
6516       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6517                 "VPlan-native path.\n");
6518   return VectorizationFactor::Disabled();
6519 }
6520 
6521 Optional<VectorizationFactor> LoopVectorizationPlanner::plan(unsigned UserVF) {
6522   assert(OrigLoop->empty() && "Inner loop expected.");
6523   Optional<unsigned> MaybeMaxVF = CM.computeMaxVF();
6524   if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved.
6525     return None;
6526 
6527   // Invalidate interleave groups if all blocks of loop will be predicated.
6528   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
6529       !useMaskedInterleavedAccesses(*TTI)) {
6530     LLVM_DEBUG(
6531         dbgs()
6532         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
6533            "which requires masked-interleaved support.\n");
6534     if (CM.InterleaveInfo.invalidateGroups())
6535       // Invalidating interleave groups also requires invalidating all decisions
6536       // based on them, which includes widening decisions and uniform and scalar
6537       // values.
6538       CM.invalidateCostModelingDecisions();
6539   }
6540 
6541   if (UserVF) {
6542     LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6543     assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
6544     // Collect the instructions (and their associated costs) that will be more
6545     // profitable to scalarize.
6546     CM.selectUserVectorizationFactor(UserVF);
6547     buildVPlansWithVPRecipes(UserVF, UserVF);
6548     LLVM_DEBUG(printPlans(dbgs()));
6549     return {{UserVF, 0}};
6550   }
6551 
6552   unsigned MaxVF = MaybeMaxVF.getValue();
6553   assert(MaxVF != 0 && "MaxVF is zero.");
6554 
6555   for (unsigned VF = 1; VF <= MaxVF; VF *= 2) {
6556     // Collect Uniform and Scalar instructions after vectorization with VF.
6557     CM.collectUniformsAndScalars(VF);
6558 
6559     // Collect the instructions (and their associated costs) that will be more
6560     // profitable to scalarize.
6561     if (VF > 1)
6562       CM.collectInstsToScalarize(VF);
6563   }
6564 
6565   buildVPlansWithVPRecipes(1, MaxVF);
6566   LLVM_DEBUG(printPlans(dbgs()));
6567   if (MaxVF == 1)
6568     return VectorizationFactor::Disabled();
6569 
6570   // Select the optimal vectorization factor.
6571   return CM.selectVectorizationFactor(MaxVF);
6572 }
6573 
6574 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) {
6575   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
6576                     << '\n');
6577   BestVF = VF;
6578   BestUF = UF;
6579 
6580   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
6581     return !Plan->hasVF(VF);
6582   });
6583   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
6584 }
6585 
6586 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
6587                                            DominatorTree *DT) {
6588   // Perform the actual loop transformation.
6589 
6590   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
6591   VPCallbackILV CallbackILV(ILV);
6592 
6593   VPTransformState State{BestVF, BestUF,      LI,
6594                          DT,     ILV.Builder, ILV.VectorLoopValueMap,
6595                          &ILV,   CallbackILV};
6596   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
6597   State.TripCount = ILV.getOrCreateTripCount(nullptr);
6598   State.CanonicalIV = ILV.Induction;
6599 
6600   //===------------------------------------------------===//
6601   //
6602   // Notice: any optimization or new instruction that go
6603   // into the code below should also be implemented in
6604   // the cost-model.
6605   //
6606   //===------------------------------------------------===//
6607 
6608   // 2. Copy and widen instructions from the old loop into the new loop.
6609   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
6610   VPlans.front()->execute(&State);
6611 
6612   // 3. Fix the vectorized code: take care of header phi's, live-outs,
6613   //    predication, updating analyses.
6614   ILV.fixVectorizedLoop();
6615 }
6616 
6617 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
6618     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
6619   BasicBlock *Latch = OrigLoop->getLoopLatch();
6620 
6621   // We create new control-flow for the vectorized loop, so the original
6622   // condition will be dead after vectorization if it's only used by the
6623   // branch.
6624   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
6625   if (Cmp && Cmp->hasOneUse())
6626     DeadInstructions.insert(Cmp);
6627 
6628   // We create new "steps" for induction variable updates to which the original
6629   // induction variables map. An original update instruction will be dead if
6630   // all its users except the induction variable are dead.
6631   for (auto &Induction : Legal->getInductionVars()) {
6632     PHINode *Ind = Induction.first;
6633     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
6634     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
6635           return U == Ind || DeadInstructions.find(cast<Instruction>(U)) !=
6636                                  DeadInstructions.end();
6637         }))
6638       DeadInstructions.insert(IndUpdate);
6639 
6640     // We record as "Dead" also the type-casting instructions we had identified
6641     // during induction analysis. We don't need any handling for them in the
6642     // vectorized loop because we have proven that, under a proper runtime
6643     // test guarding the vectorized loop, the value of the phi, and the casted
6644     // value of the phi, are the same. The last instruction in this casting chain
6645     // will get its scalar/vector/widened def from the scalar/vector/widened def
6646     // of the respective phi node. Any other casts in the induction def-use chain
6647     // have no other uses outside the phi update chain, and will be ignored.
6648     InductionDescriptor &IndDes = Induction.second;
6649     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6650     DeadInstructions.insert(Casts.begin(), Casts.end());
6651   }
6652 }
6653 
6654 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
6655 
6656 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
6657 
6658 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
6659                                         Instruction::BinaryOps BinOp) {
6660   // When unrolling and the VF is 1, we only need to add a simple scalar.
6661   Type *Ty = Val->getType();
6662   assert(!Ty->isVectorTy() && "Val must be a scalar");
6663 
6664   if (Ty->isFloatingPointTy()) {
6665     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
6666 
6667     // Floating point operations had to be 'fast' to enable the unrolling.
6668     Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
6669     return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
6670   }
6671   Constant *C = ConstantInt::get(Ty, StartIdx);
6672   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
6673 }
6674 
6675 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
6676   SmallVector<Metadata *, 4> MDs;
6677   // Reserve first location for self reference to the LoopID metadata node.
6678   MDs.push_back(nullptr);
6679   bool IsUnrollMetadata = false;
6680   MDNode *LoopID = L->getLoopID();
6681   if (LoopID) {
6682     // First find existing loop unrolling disable metadata.
6683     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
6684       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
6685       if (MD) {
6686         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
6687         IsUnrollMetadata =
6688             S && S->getString().startswith("llvm.loop.unroll.disable");
6689       }
6690       MDs.push_back(LoopID->getOperand(i));
6691     }
6692   }
6693 
6694   if (!IsUnrollMetadata) {
6695     // Add runtime unroll disable metadata.
6696     LLVMContext &Context = L->getHeader()->getContext();
6697     SmallVector<Metadata *, 1> DisableOperands;
6698     DisableOperands.push_back(
6699         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
6700     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
6701     MDs.push_back(DisableNode);
6702     MDNode *NewLoopID = MDNode::get(Context, MDs);
6703     // Set operand 0 to refer to the loop id itself.
6704     NewLoopID->replaceOperandWith(0, NewLoopID);
6705     L->setLoopID(NewLoopID);
6706   }
6707 }
6708 
6709 bool LoopVectorizationPlanner::getDecisionAndClampRange(
6710     const std::function<bool(unsigned)> &Predicate, VFRange &Range) {
6711   assert(Range.End > Range.Start && "Trying to test an empty VF range.");
6712   bool PredicateAtRangeStart = Predicate(Range.Start);
6713 
6714   for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2)
6715     if (Predicate(TmpVF) != PredicateAtRangeStart) {
6716       Range.End = TmpVF;
6717       break;
6718     }
6719 
6720   return PredicateAtRangeStart;
6721 }
6722 
6723 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
6724 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
6725 /// of VF's starting at a given VF and extending it as much as possible. Each
6726 /// vectorization decision can potentially shorten this sub-range during
6727 /// buildVPlan().
6728 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) {
6729   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
6730     VFRange SubRange = {VF, MaxVF + 1};
6731     VPlans.push_back(buildVPlan(SubRange));
6732     VF = SubRange.End;
6733   }
6734 }
6735 
6736 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
6737                                          VPlanPtr &Plan) {
6738   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
6739 
6740   // Look for cached value.
6741   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
6742   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
6743   if (ECEntryIt != EdgeMaskCache.end())
6744     return ECEntryIt->second;
6745 
6746   VPValue *SrcMask = createBlockInMask(Src, Plan);
6747 
6748   // The terminator has to be a branch inst!
6749   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
6750   assert(BI && "Unexpected terminator found");
6751 
6752   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
6753     return EdgeMaskCache[Edge] = SrcMask;
6754 
6755   VPValue *EdgeMask = Plan->getVPValue(BI->getCondition());
6756   assert(EdgeMask && "No Edge Mask found for condition");
6757 
6758   if (BI->getSuccessor(0) != Dst)
6759     EdgeMask = Builder.createNot(EdgeMask);
6760 
6761   if (SrcMask) // Otherwise block in-mask is all-one, no need to AND.
6762     EdgeMask = Builder.createAnd(EdgeMask, SrcMask);
6763 
6764   return EdgeMaskCache[Edge] = EdgeMask;
6765 }
6766 
6767 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
6768   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
6769 
6770   // Look for cached value.
6771   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
6772   if (BCEntryIt != BlockMaskCache.end())
6773     return BCEntryIt->second;
6774 
6775   // All-one mask is modelled as no-mask following the convention for masked
6776   // load/store/gather/scatter. Initialize BlockMask to no-mask.
6777   VPValue *BlockMask = nullptr;
6778 
6779   if (OrigLoop->getHeader() == BB) {
6780     if (!CM.blockNeedsPredication(BB))
6781       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
6782 
6783     // Introduce the early-exit compare IV <= BTC to form header block mask.
6784     // This is used instead of IV < TC because TC may wrap, unlike BTC.
6785     // Start by constructing the desired canonical IV.
6786     VPValue *IV = nullptr;
6787     if (Legal->getPrimaryInduction())
6788       IV = Plan->getVPValue(Legal->getPrimaryInduction());
6789     else {
6790       auto IVRecipe = new VPWidenCanonicalIVRecipe();
6791       Builder.getInsertBlock()->appendRecipe(IVRecipe);
6792       IV = IVRecipe->getVPValue();
6793     }
6794     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
6795     BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
6796     return BlockMaskCache[BB] = BlockMask;
6797   }
6798 
6799   // This is the block mask. We OR all incoming edges.
6800   for (auto *Predecessor : predecessors(BB)) {
6801     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
6802     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
6803       return BlockMaskCache[BB] = EdgeMask;
6804 
6805     if (!BlockMask) { // BlockMask has its initialized nullptr value.
6806       BlockMask = EdgeMask;
6807       continue;
6808     }
6809 
6810     BlockMask = Builder.createOr(BlockMask, EdgeMask);
6811   }
6812 
6813   return BlockMaskCache[BB] = BlockMask;
6814 }
6815 
6816 VPWidenMemoryInstructionRecipe *
6817 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range,
6818                                   VPlanPtr &Plan) {
6819   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
6820          "Must be called with either a load or store");
6821 
6822   auto willWiden = [&](unsigned VF) -> bool {
6823     if (VF == 1)
6824       return false;
6825     LoopVectorizationCostModel::InstWidening Decision =
6826         CM.getWideningDecision(I, VF);
6827     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
6828            "CM decision should be taken at this point.");
6829     if (Decision == LoopVectorizationCostModel::CM_Interleave)
6830       return true;
6831     if (CM.isScalarAfterVectorization(I, VF) ||
6832         CM.isProfitableToScalarize(I, VF))
6833       return false;
6834     return Decision != LoopVectorizationCostModel::CM_Scalarize;
6835   };
6836 
6837   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
6838     return nullptr;
6839 
6840   VPValue *Mask = nullptr;
6841   if (Legal->isMaskRequired(I))
6842     Mask = createBlockInMask(I->getParent(), Plan);
6843 
6844   VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I));
6845   if (LoadInst *Load = dyn_cast<LoadInst>(I))
6846     return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask);
6847 
6848   StoreInst *Store = cast<StoreInst>(I);
6849   VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand());
6850   return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask);
6851 }
6852 
6853 VPWidenIntOrFpInductionRecipe *
6854 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi) const {
6855   // Check if this is an integer or fp induction. If so, build the recipe that
6856   // produces its scalar and vector values.
6857   InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
6858   if (II.getKind() == InductionDescriptor::IK_IntInduction ||
6859       II.getKind() == InductionDescriptor::IK_FpInduction)
6860     return new VPWidenIntOrFpInductionRecipe(Phi);
6861 
6862   return nullptr;
6863 }
6864 
6865 VPWidenIntOrFpInductionRecipe *
6866 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I,
6867                                                 VFRange &Range) const {
6868   // Optimize the special case where the source is a constant integer
6869   // induction variable. Notice that we can only optimize the 'trunc' case
6870   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
6871   // (c) other casts depend on pointer size.
6872 
6873   // Determine whether \p K is a truncation based on an induction variable that
6874   // can be optimized.
6875   auto isOptimizableIVTruncate =
6876       [&](Instruction *K) -> std::function<bool(unsigned)> {
6877     return
6878         [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); };
6879   };
6880 
6881   if (LoopVectorizationPlanner::getDecisionAndClampRange(
6882           isOptimizableIVTruncate(I), Range))
6883     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
6884                                              I);
6885   return nullptr;
6886 }
6887 
6888 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) {
6889   // We know that all PHIs in non-header blocks are converted into selects, so
6890   // we don't have to worry about the insertion order and we can just use the
6891   // builder. At this point we generate the predication tree. There may be
6892   // duplications since this is a simple recursive scan, but future
6893   // optimizations will clean it up.
6894 
6895   SmallVector<VPValue *, 2> Operands;
6896   unsigned NumIncoming = Phi->getNumIncomingValues();
6897   for (unsigned In = 0; In < NumIncoming; In++) {
6898     VPValue *EdgeMask =
6899       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
6900     assert((EdgeMask || NumIncoming == 1) &&
6901            "Multiple predecessors with one having a full mask");
6902     Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In)));
6903     if (EdgeMask)
6904       Operands.push_back(EdgeMask);
6905   }
6906   return new VPBlendRecipe(Phi, Operands);
6907 }
6908 
6909 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range,
6910                                                    VPlan &Plan) const {
6911 
6912   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
6913       [this, CI](unsigned VF) { return CM.isScalarWithPredication(CI, VF); },
6914       Range);
6915 
6916   if (IsPredicated)
6917     return nullptr;
6918 
6919   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
6920   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
6921              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect))
6922     return nullptr;
6923 
6924   auto willWiden = [&](unsigned VF) -> bool {
6925     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
6926     // The following case may be scalarized depending on the VF.
6927     // The flag shows whether we use Intrinsic or a usual Call for vectorized
6928     // version of the instruction.
6929     // Is it beneficial to perform intrinsic call compared to lib call?
6930     bool NeedToScalarize = false;
6931     unsigned CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
6932     bool UseVectorIntrinsic =
6933         ID && CM.getVectorIntrinsicCost(CI, VF) <= CallCost;
6934     return UseVectorIntrinsic || !NeedToScalarize;
6935   };
6936 
6937   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
6938     return nullptr;
6939 
6940   return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands()));
6941 }
6942 
6943 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
6944   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
6945          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
6946   // Instruction should be widened, unless it is scalar after vectorization,
6947   // scalarization is profitable or it is predicated.
6948   auto WillScalarize = [this, I](unsigned VF) -> bool {
6949     return CM.isScalarAfterVectorization(I, VF) ||
6950            CM.isProfitableToScalarize(I, VF) ||
6951            CM.isScalarWithPredication(I, VF);
6952   };
6953   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
6954                                                              Range);
6955 }
6956 
6957 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const {
6958   auto IsVectorizableOpcode = [](unsigned Opcode) {
6959     switch (Opcode) {
6960     case Instruction::Add:
6961     case Instruction::And:
6962     case Instruction::AShr:
6963     case Instruction::BitCast:
6964     case Instruction::FAdd:
6965     case Instruction::FCmp:
6966     case Instruction::FDiv:
6967     case Instruction::FMul:
6968     case Instruction::FNeg:
6969     case Instruction::FPExt:
6970     case Instruction::FPToSI:
6971     case Instruction::FPToUI:
6972     case Instruction::FPTrunc:
6973     case Instruction::FRem:
6974     case Instruction::FSub:
6975     case Instruction::ICmp:
6976     case Instruction::IntToPtr:
6977     case Instruction::LShr:
6978     case Instruction::Mul:
6979     case Instruction::Or:
6980     case Instruction::PtrToInt:
6981     case Instruction::SDiv:
6982     case Instruction::Select:
6983     case Instruction::SExt:
6984     case Instruction::Shl:
6985     case Instruction::SIToFP:
6986     case Instruction::SRem:
6987     case Instruction::Sub:
6988     case Instruction::Trunc:
6989     case Instruction::UDiv:
6990     case Instruction::UIToFP:
6991     case Instruction::URem:
6992     case Instruction::Xor:
6993     case Instruction::ZExt:
6994       return true;
6995     }
6996     return false;
6997   };
6998 
6999   if (!IsVectorizableOpcode(I->getOpcode()))
7000     return nullptr;
7001 
7002   // Success: widen this instruction.
7003   return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands()));
7004 }
7005 
7006 VPBasicBlock *VPRecipeBuilder::handleReplication(
7007     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
7008     DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
7009     VPlanPtr &Plan) {
7010   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
7011       [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); },
7012       Range);
7013 
7014   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
7015       [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range);
7016 
7017   auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated);
7018   setRecipe(I, Recipe);
7019 
7020   // Find if I uses a predicated instruction. If so, it will use its scalar
7021   // value. Avoid hoisting the insert-element which packs the scalar value into
7022   // a vector value, as that happens iff all users use the vector value.
7023   for (auto &Op : I->operands())
7024     if (auto *PredInst = dyn_cast<Instruction>(Op))
7025       if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
7026         PredInst2Recipe[PredInst]->setAlsoPack(false);
7027 
7028   // Finalize the recipe for Instr, first if it is not predicated.
7029   if (!IsPredicated) {
7030     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
7031     VPBB->appendRecipe(Recipe);
7032     return VPBB;
7033   }
7034   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
7035   assert(VPBB->getSuccessors().empty() &&
7036          "VPBB has successors when handling predicated replication.");
7037   // Record predicated instructions for above packing optimizations.
7038   PredInst2Recipe[I] = Recipe;
7039   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
7040   VPBlockUtils::insertBlockAfter(Region, VPBB);
7041   auto *RegSucc = new VPBasicBlock();
7042   VPBlockUtils::insertBlockAfter(RegSucc, Region);
7043   return RegSucc;
7044 }
7045 
7046 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
7047                                                       VPRecipeBase *PredRecipe,
7048                                                       VPlanPtr &Plan) {
7049   // Instructions marked for predication are replicated and placed under an
7050   // if-then construct to prevent side-effects.
7051 
7052   // Generate recipes to compute the block mask for this region.
7053   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
7054 
7055   // Build the triangular if-then region.
7056   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
7057   assert(Instr->getParent() && "Predicated instruction not in any basic block");
7058   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
7059   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
7060   auto *PHIRecipe =
7061       Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr);
7062   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
7063   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
7064   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
7065 
7066   // Note: first set Entry as region entry and then connect successors starting
7067   // from it in order, to propagate the "parent" of each VPBasicBlock.
7068   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
7069   VPBlockUtils::connectBlocks(Pred, Exit);
7070 
7071   return Region;
7072 }
7073 
7074 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
7075                                                       VFRange &Range,
7076                                                       VPlanPtr &Plan) {
7077   // First, check for specific widening recipes that deal with calls, memory
7078   // operations, inductions and Phi nodes.
7079   if (auto *CI = dyn_cast<CallInst>(Instr))
7080     return tryToWidenCall(CI, Range, *Plan);
7081 
7082   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
7083     return tryToWidenMemory(Instr, Range, Plan);
7084 
7085   VPRecipeBase *Recipe;
7086   if (auto Phi = dyn_cast<PHINode>(Instr)) {
7087     if (Phi->getParent() != OrigLoop->getHeader())
7088       return tryToBlend(Phi, Plan);
7089     if ((Recipe = tryToOptimizeInductionPHI(Phi)))
7090       return Recipe;
7091     return new VPWidenPHIRecipe(Phi);
7092     return new VPWidenPHIRecipe(Phi);
7093   }
7094 
7095   if (isa<TruncInst>(Instr) &&
7096       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Range)))
7097     return Recipe;
7098 
7099   if (!shouldWiden(Instr, Range))
7100     return nullptr;
7101 
7102   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
7103     return new VPWidenGEPRecipe(GEP, OrigLoop);
7104 
7105   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
7106     bool InvariantCond =
7107         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
7108     return new VPWidenSelectRecipe(*SI, InvariantCond);
7109   }
7110 
7111   return tryToWiden(Instr, *Plan);
7112 }
7113 
7114 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF,
7115                                                         unsigned MaxVF) {
7116   assert(OrigLoop->empty() && "Inner loop expected.");
7117 
7118   // Collect conditions feeding internal conditional branches; they need to be
7119   // represented in VPlan for it to model masking.
7120   SmallPtrSet<Value *, 1> NeedDef;
7121 
7122   auto *Latch = OrigLoop->getLoopLatch();
7123   for (BasicBlock *BB : OrigLoop->blocks()) {
7124     if (BB == Latch)
7125       continue;
7126     BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator());
7127     if (Branch && Branch->isConditional())
7128       NeedDef.insert(Branch->getCondition());
7129   }
7130 
7131   // If the tail is to be folded by masking, the primary induction variable, if
7132   // exists needs to be represented in VPlan for it to model early-exit masking.
7133   // Also, both the Phi and the live-out instruction of each reduction are
7134   // required in order to introduce a select between them in VPlan.
7135   if (CM.foldTailByMasking()) {
7136     if (Legal->getPrimaryInduction())
7137       NeedDef.insert(Legal->getPrimaryInduction());
7138     for (auto &Reduction : Legal->getReductionVars()) {
7139       NeedDef.insert(Reduction.first);
7140       NeedDef.insert(Reduction.second.getLoopExitInstr());
7141     }
7142   }
7143 
7144   // Collect instructions from the original loop that will become trivially dead
7145   // in the vectorized loop. We don't need to vectorize these instructions. For
7146   // example, original induction update instructions can become dead because we
7147   // separately emit induction "steps" when generating code for the new loop.
7148   // Similarly, we create a new latch condition when setting up the structure
7149   // of the new loop, so the old one can become dead.
7150   SmallPtrSet<Instruction *, 4> DeadInstructions;
7151   collectTriviallyDeadInstructions(DeadInstructions);
7152 
7153   // Add assume instructions we need to drop to DeadInstructions, to prevent
7154   // them from being added to the VPlan.
7155   // TODO: We only need to drop assumes in blocks that get flattend. If the
7156   // control flow is preserved, we should keep them.
7157   auto &ConditionalAssumes = Legal->getConditionalAssumes();
7158   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
7159 
7160   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
7161   // Dead instructions do not need sinking. Remove them from SinkAfter.
7162   for (Instruction *I : DeadInstructions)
7163     SinkAfter.erase(I);
7164 
7165   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
7166     VFRange SubRange = {VF, MaxVF + 1};
7167     VPlans.push_back(buildVPlanWithVPRecipes(SubRange, NeedDef,
7168                                              DeadInstructions, SinkAfter));
7169     VF = SubRange.End;
7170   }
7171 }
7172 
7173 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
7174     VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef,
7175     SmallPtrSetImpl<Instruction *> &DeadInstructions,
7176     const DenseMap<Instruction *, Instruction *> &SinkAfter) {
7177 
7178   // Hold a mapping from predicated instructions to their recipes, in order to
7179   // fix their AlsoPack behavior if a user is determined to replicate and use a
7180   // scalar instead of vector value.
7181   DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
7182 
7183   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
7184 
7185   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
7186 
7187   // ---------------------------------------------------------------------------
7188   // Pre-construction: record ingredients whose recipes we'll need to further
7189   // process after constructing the initial VPlan.
7190   // ---------------------------------------------------------------------------
7191 
7192   // Mark instructions we'll need to sink later and their targets as
7193   // ingredients whose recipe we'll need to record.
7194   for (auto &Entry : SinkAfter) {
7195     RecipeBuilder.recordRecipeOf(Entry.first);
7196     RecipeBuilder.recordRecipeOf(Entry.second);
7197   }
7198 
7199   // For each interleave group which is relevant for this (possibly trimmed)
7200   // Range, add it to the set of groups to be later applied to the VPlan and add
7201   // placeholders for its members' Recipes which we'll be replacing with a
7202   // single VPInterleaveRecipe.
7203   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
7204     auto applyIG = [IG, this](unsigned VF) -> bool {
7205       return (VF >= 2 && // Query is illegal for VF == 1
7206               CM.getWideningDecision(IG->getInsertPos(), VF) ==
7207                   LoopVectorizationCostModel::CM_Interleave);
7208     };
7209     if (!getDecisionAndClampRange(applyIG, Range))
7210       continue;
7211     InterleaveGroups.insert(IG);
7212     for (unsigned i = 0; i < IG->getFactor(); i++)
7213       if (Instruction *Member = IG->getMember(i))
7214         RecipeBuilder.recordRecipeOf(Member);
7215   };
7216 
7217   // ---------------------------------------------------------------------------
7218   // Build initial VPlan: Scan the body of the loop in a topological order to
7219   // visit each basic block after having visited its predecessor basic blocks.
7220   // ---------------------------------------------------------------------------
7221 
7222   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
7223   auto Plan = std::make_unique<VPlan>();
7224   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
7225   Plan->setEntry(VPBB);
7226 
7227   // Represent values that will have defs inside VPlan.
7228   for (Value *V : NeedDef)
7229     Plan->addVPValue(V);
7230 
7231   // Scan the body of the loop in a topological order to visit each basic block
7232   // after having visited its predecessor basic blocks.
7233   LoopBlocksDFS DFS(OrigLoop);
7234   DFS.perform(LI);
7235 
7236   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
7237     // Relevant instructions from basic block BB will be grouped into VPRecipe
7238     // ingredients and fill a new VPBasicBlock.
7239     unsigned VPBBsForBB = 0;
7240     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
7241     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
7242     VPBB = FirstVPBBForBB;
7243     Builder.setInsertPoint(VPBB);
7244 
7245     // Introduce each ingredient into VPlan.
7246     // TODO: Model and preserve debug instrinsics in VPlan.
7247     for (Instruction &I : BB->instructionsWithoutDebug()) {
7248       Instruction *Instr = &I;
7249 
7250       // First filter out irrelevant instructions, to ensure no recipes are
7251       // built for them.
7252       if (isa<BranchInst>(Instr) ||
7253           DeadInstructions.find(Instr) != DeadInstructions.end())
7254         continue;
7255 
7256       if (auto Recipe =
7257               RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) {
7258         RecipeBuilder.setRecipe(Instr, Recipe);
7259         VPBB->appendRecipe(Recipe);
7260         continue;
7261       }
7262 
7263       // Otherwise, if all widening options failed, Instruction is to be
7264       // replicated. This may create a successor for VPBB.
7265       VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication(
7266           Instr, Range, VPBB, PredInst2Recipe, Plan);
7267       if (NextVPBB != VPBB) {
7268         VPBB = NextVPBB;
7269         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
7270                                     : "");
7271       }
7272     }
7273   }
7274 
7275   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
7276   // may also be empty, such as the last one VPBB, reflecting original
7277   // basic-blocks with no recipes.
7278   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
7279   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
7280   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
7281   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
7282   delete PreEntry;
7283 
7284   // ---------------------------------------------------------------------------
7285   // Transform initial VPlan: Apply previously taken decisions, in order, to
7286   // bring the VPlan to its final state.
7287   // ---------------------------------------------------------------------------
7288 
7289   // Apply Sink-After legal constraints.
7290   for (auto &Entry : SinkAfter) {
7291     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
7292     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
7293     Sink->moveAfter(Target);
7294   }
7295 
7296   // Interleave memory: for each Interleave Group we marked earlier as relevant
7297   // for this VPlan, replace the Recipes widening its memory instructions with a
7298   // single VPInterleaveRecipe at its insertion point.
7299   for (auto IG : InterleaveGroups) {
7300     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
7301         RecipeBuilder.getRecipe(IG->getInsertPos()));
7302     (new VPInterleaveRecipe(IG, Recipe->getAddr(), Recipe->getMask()))
7303         ->insertBefore(Recipe);
7304 
7305     for (unsigned i = 0; i < IG->getFactor(); ++i)
7306       if (Instruction *Member = IG->getMember(i)) {
7307         RecipeBuilder.getRecipe(Member)->eraseFromParent();
7308       }
7309   }
7310 
7311   // Finally, if tail is folded by masking, introduce selects between the phi
7312   // and the live-out instruction of each reduction, at the end of the latch.
7313   if (CM.foldTailByMasking()) {
7314     Builder.setInsertPoint(VPBB);
7315     auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
7316     for (auto &Reduction : Legal->getReductionVars()) {
7317       VPValue *Phi = Plan->getVPValue(Reduction.first);
7318       VPValue *Red = Plan->getVPValue(Reduction.second.getLoopExitInstr());
7319       Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
7320     }
7321   }
7322 
7323   std::string PlanName;
7324   raw_string_ostream RSO(PlanName);
7325   unsigned VF = Range.Start;
7326   Plan->addVF(VF);
7327   RSO << "Initial VPlan for VF={" << VF;
7328   for (VF *= 2; VF < Range.End; VF *= 2) {
7329     Plan->addVF(VF);
7330     RSO << "," << VF;
7331   }
7332   RSO << "},UF>=1";
7333   RSO.flush();
7334   Plan->setName(PlanName);
7335 
7336   return Plan;
7337 }
7338 
7339 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
7340   // Outer loop handling: They may require CFG and instruction level
7341   // transformations before even evaluating whether vectorization is profitable.
7342   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7343   // the vectorization pipeline.
7344   assert(!OrigLoop->empty());
7345   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7346 
7347   // Create new empty VPlan
7348   auto Plan = std::make_unique<VPlan>();
7349 
7350   // Build hierarchical CFG
7351   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
7352   HCFGBuilder.buildHierarchicalCFG();
7353 
7354   for (unsigned VF = Range.Start; VF < Range.End; VF *= 2)
7355     Plan->addVF(VF);
7356 
7357   if (EnableVPlanPredication) {
7358     VPlanPredicator VPP(*Plan);
7359     VPP.predicate();
7360 
7361     // Avoid running transformation to recipes until masked code generation in
7362     // VPlan-native path is in place.
7363     return Plan;
7364   }
7365 
7366   SmallPtrSet<Instruction *, 1> DeadInstructions;
7367   VPlanTransforms::VPInstructionsToVPRecipes(
7368       OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions);
7369   return Plan;
7370 }
7371 
7372 Value* LoopVectorizationPlanner::VPCallbackILV::
7373 getOrCreateVectorValues(Value *V, unsigned Part) {
7374       return ILV.getOrCreateVectorValue(V, Part);
7375 }
7376 
7377 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue(
7378     Value *V, const VPIteration &Instance) {
7379   return ILV.getOrCreateScalarValue(V, Instance);
7380 }
7381 
7382 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
7383                                VPSlotTracker &SlotTracker) const {
7384   O << " +\n"
7385     << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
7386   IG->getInsertPos()->printAsOperand(O, false);
7387   O << ", ";
7388   getAddr()->printAsOperand(O, SlotTracker);
7389   VPValue *Mask = getMask();
7390   if (Mask) {
7391     O << ", ";
7392     Mask->printAsOperand(O, SlotTracker);
7393   }
7394   O << "\\l\"";
7395   for (unsigned i = 0; i < IG->getFactor(); ++i)
7396     if (Instruction *I = IG->getMember(i))
7397       O << " +\n"
7398         << Indent << "\"  " << VPlanIngredient(I) << " " << i << "\\l\"";
7399 }
7400 
7401 void VPWidenCallRecipe::execute(VPTransformState &State) {
7402   State.ILV->widenCallInstruction(Ingredient, User, State);
7403 }
7404 
7405 void VPWidenSelectRecipe::execute(VPTransformState &State) {
7406   State.ILV->widenSelectInstruction(Ingredient, InvariantCond);
7407 }
7408 
7409 void VPWidenRecipe::execute(VPTransformState &State) {
7410   State.ILV->widenInstruction(Ingredient, User, State);
7411 }
7412 
7413 void VPWidenGEPRecipe::execute(VPTransformState &State) {
7414   State.ILV->widenGEP(GEP, State.UF, State.VF, IsPtrLoopInvariant,
7415                       IsIndexLoopInvariant);
7416 }
7417 
7418 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
7419   assert(!State.Instance && "Int or FP induction being replicated.");
7420   State.ILV->widenIntOrFpInduction(IV, Trunc);
7421 }
7422 
7423 void VPWidenPHIRecipe::execute(VPTransformState &State) {
7424   State.ILV->widenPHIInstruction(Phi, State.UF, State.VF);
7425 }
7426 
7427 void VPBlendRecipe::execute(VPTransformState &State) {
7428   State.ILV->setDebugLocFromInst(State.Builder, Phi);
7429   // We know that all PHIs in non-header blocks are converted into
7430   // selects, so we don't have to worry about the insertion order and we
7431   // can just use the builder.
7432   // At this point we generate the predication tree. There may be
7433   // duplications since this is a simple recursive scan, but future
7434   // optimizations will clean it up.
7435 
7436   unsigned NumIncoming = getNumIncomingValues();
7437 
7438   // Generate a sequence of selects of the form:
7439   // SELECT(Mask3, In3,
7440   //        SELECT(Mask2, In2,
7441   //               SELECT(Mask1, In1,
7442   //                      In0)))
7443   // Note that Mask0 is never used: lanes for which no path reaches this phi and
7444   // are essentially undef are taken from In0.
7445   InnerLoopVectorizer::VectorParts Entry(State.UF);
7446   for (unsigned In = 0; In < NumIncoming; ++In) {
7447     for (unsigned Part = 0; Part < State.UF; ++Part) {
7448       // We might have single edge PHIs (blocks) - use an identity
7449       // 'select' for the first PHI operand.
7450       Value *In0 = State.get(getIncomingValue(In), Part);
7451       if (In == 0)
7452         Entry[Part] = In0; // Initialize with the first incoming value.
7453       else {
7454         // Select between the current value and the previous incoming edge
7455         // based on the incoming mask.
7456         Value *Cond = State.get(getMask(In), Part);
7457         Entry[Part] =
7458             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
7459       }
7460     }
7461   }
7462   for (unsigned Part = 0; Part < State.UF; ++Part)
7463     State.ValueMap.setVectorValue(Phi, Part, Entry[Part]);
7464 }
7465 
7466 void VPInterleaveRecipe::execute(VPTransformState &State) {
7467   assert(!State.Instance && "Interleave group being replicated.");
7468   State.ILV->vectorizeInterleaveGroup(IG, State, getAddr(), getMask());
7469 }
7470 
7471 void VPReplicateRecipe::execute(VPTransformState &State) {
7472   if (State.Instance) { // Generate a single instance.
7473     State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated);
7474     // Insert scalar instance packing it into a vector.
7475     if (AlsoPack && State.VF > 1) {
7476       // If we're constructing lane 0, initialize to start from undef.
7477       if (State.Instance->Lane == 0) {
7478         Value *Undef =
7479             UndefValue::get(VectorType::get(Ingredient->getType(), State.VF));
7480         State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef);
7481       }
7482       State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance);
7483     }
7484     return;
7485   }
7486 
7487   // Generate scalar instances for all VF lanes of all UF parts, unless the
7488   // instruction is uniform inwhich case generate only the first lane for each
7489   // of the UF parts.
7490   unsigned EndLane = IsUniform ? 1 : State.VF;
7491   for (unsigned Part = 0; Part < State.UF; ++Part)
7492     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
7493       State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated);
7494 }
7495 
7496 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
7497   assert(State.Instance && "Branch on Mask works only on single instance.");
7498 
7499   unsigned Part = State.Instance->Part;
7500   unsigned Lane = State.Instance->Lane;
7501 
7502   Value *ConditionBit = nullptr;
7503   if (!User) // Block in mask is all-one.
7504     ConditionBit = State.Builder.getTrue();
7505   else {
7506     VPValue *BlockInMask = User->getOperand(0);
7507     ConditionBit = State.get(BlockInMask, Part);
7508     if (ConditionBit->getType()->isVectorTy())
7509       ConditionBit = State.Builder.CreateExtractElement(
7510           ConditionBit, State.Builder.getInt32(Lane));
7511   }
7512 
7513   // Replace the temporary unreachable terminator with a new conditional branch,
7514   // whose two destinations will be set later when they are created.
7515   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
7516   assert(isa<UnreachableInst>(CurrentTerminator) &&
7517          "Expected to replace unreachable terminator with conditional branch.");
7518   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
7519   CondBr->setSuccessor(0, nullptr);
7520   ReplaceInstWithInst(CurrentTerminator, CondBr);
7521 }
7522 
7523 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
7524   assert(State.Instance && "Predicated instruction PHI works per instance.");
7525   Instruction *ScalarPredInst = cast<Instruction>(
7526       State.ValueMap.getScalarValue(PredInst, *State.Instance));
7527   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
7528   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
7529   assert(PredicatingBB && "Predicated block has no single predecessor.");
7530 
7531   // By current pack/unpack logic we need to generate only a single phi node: if
7532   // a vector value for the predicated instruction exists at this point it means
7533   // the instruction has vector users only, and a phi for the vector value is
7534   // needed. In this case the recipe of the predicated instruction is marked to
7535   // also do that packing, thereby "hoisting" the insert-element sequence.
7536   // Otherwise, a phi node for the scalar value is needed.
7537   unsigned Part = State.Instance->Part;
7538   if (State.ValueMap.hasVectorValue(PredInst, Part)) {
7539     Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part);
7540     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
7541     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
7542     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
7543     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
7544     State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache.
7545   } else {
7546     Type *PredInstType = PredInst->getType();
7547     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
7548     Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB);
7549     Phi->addIncoming(ScalarPredInst, PredicatedBB);
7550     State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi);
7551   }
7552 }
7553 
7554 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
7555   VPValue *StoredValue = isa<StoreInst>(Instr) ? getStoredValue() : nullptr;
7556   State.ILV->vectorizeMemoryInstruction(&Instr, State, getAddr(), StoredValue,
7557                                         getMask());
7558 }
7559 
7560 // Determine how to lower the scalar epilogue, which depends on 1) optimising
7561 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
7562 // predication, and 4) a TTI hook that analyses whether the loop is suitable
7563 // for predication.
7564 static ScalarEpilogueLowering getScalarEpilogueLowering(
7565     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
7566     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
7567     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
7568     LoopVectorizationLegality &LVL) {
7569   bool OptSize =
7570       F->hasOptSize() || llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
7571                                                      PGSOQueryType::IRPass);
7572   // 1) OptSize takes precedence over all other options, i.e. if this is set,
7573   // don't look at hints or options, and don't request a scalar epilogue.
7574   if (OptSize && Hints.getForce() != LoopVectorizeHints::FK_Enabled)
7575     return CM_ScalarEpilogueNotAllowedOptSize;
7576 
7577   bool PredicateOptDisabled = PreferPredicateOverEpilog.getNumOccurrences() &&
7578                               !PreferPredicateOverEpilog;
7579 
7580   // 2) Next, if disabling predication is requested on the command line, honour
7581   // this and request a scalar epilogue.
7582   if (PredicateOptDisabled)
7583     return CM_ScalarEpilogueAllowed;
7584 
7585   // 3) and 4) look if enabling predication is requested on the command line,
7586   // with a loop hint, or if the TTI hook indicates this is profitable, request
7587   // predication .
7588   if (PreferPredicateOverEpilog ||
7589       Hints.getPredicate() == LoopVectorizeHints::FK_Enabled ||
7590       (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
7591                                         LVL.getLAI()) &&
7592        Hints.getPredicate() != LoopVectorizeHints::FK_Disabled))
7593     return CM_ScalarEpilogueNotNeededUsePredicate;
7594 
7595   return CM_ScalarEpilogueAllowed;
7596 }
7597 
7598 // Process the loop in the VPlan-native vectorization path. This path builds
7599 // VPlan upfront in the vectorization pipeline, which allows to apply
7600 // VPlan-to-VPlan transformations from the very beginning without modifying the
7601 // input LLVM IR.
7602 static bool processLoopInVPlanNativePath(
7603     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
7604     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
7605     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
7606     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
7607     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) {
7608 
7609   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
7610   Function *F = L->getHeader()->getParent();
7611   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
7612 
7613   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
7614       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
7615 
7616   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
7617                                 &Hints, IAI);
7618   // Use the planner for outer loop vectorization.
7619   // TODO: CM is not used at this point inside the planner. Turn CM into an
7620   // optional argument if we don't need it in the future.
7621   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE);
7622 
7623   // Get user vectorization factor.
7624   const unsigned UserVF = Hints.getWidth();
7625 
7626   // Plan how to best vectorize, return the best VF and its cost.
7627   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
7628 
7629   // If we are stress testing VPlan builds, do not attempt to generate vector
7630   // code. Masked vector code generation support will follow soon.
7631   // Also, do not attempt to vectorize if no vector code will be produced.
7632   if (VPlanBuildStressTest || EnableVPlanPredication ||
7633       VectorizationFactor::Disabled() == VF)
7634     return false;
7635 
7636   LVP.setBestPlan(VF.Width, 1);
7637 
7638   InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
7639                          &CM);
7640   LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
7641                     << L->getHeader()->getParent()->getName() << "\"\n");
7642   LVP.executePlan(LB, DT);
7643 
7644   // Mark the loop as already vectorized to avoid vectorizing again.
7645   Hints.setAlreadyVectorized();
7646 
7647   LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent()));
7648   return true;
7649 }
7650 
7651 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
7652     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
7653                                !EnableLoopInterleaving),
7654       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
7655                               !EnableLoopVectorization) {}
7656 
7657 bool LoopVectorizePass::processLoop(Loop *L) {
7658   assert((EnableVPlanNativePath || L->empty()) &&
7659          "VPlan-native path is not enabled. Only process inner loops.");
7660 
7661 #ifndef NDEBUG
7662   const std::string DebugLocStr = getDebugLocString(L);
7663 #endif /* NDEBUG */
7664 
7665   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
7666                     << L->getHeader()->getParent()->getName() << "\" from "
7667                     << DebugLocStr << "\n");
7668 
7669   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
7670 
7671   LLVM_DEBUG(
7672       dbgs() << "LV: Loop hints:"
7673              << " force="
7674              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
7675                      ? "disabled"
7676                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
7677                             ? "enabled"
7678                             : "?"))
7679              << " width=" << Hints.getWidth()
7680              << " unroll=" << Hints.getInterleave() << "\n");
7681 
7682   // Function containing loop
7683   Function *F = L->getHeader()->getParent();
7684 
7685   // Looking at the diagnostic output is the only way to determine if a loop
7686   // was vectorized (other than looking at the IR or machine code), so it
7687   // is important to generate an optimization remark for each loop. Most of
7688   // these messages are generated as OptimizationRemarkAnalysis. Remarks
7689   // generated as OptimizationRemark and OptimizationRemarkMissed are
7690   // less verbose reporting vectorized loops and unvectorized loops that may
7691   // benefit from vectorization, respectively.
7692 
7693   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
7694     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
7695     return false;
7696   }
7697 
7698   PredicatedScalarEvolution PSE(*SE, *L);
7699 
7700   // Check if it is legal to vectorize the loop.
7701   LoopVectorizationRequirements Requirements(*ORE);
7702   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
7703                                 &Requirements, &Hints, DB, AC);
7704   if (!LVL.canVectorize(EnableVPlanNativePath)) {
7705     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
7706     Hints.emitRemarkWithHints();
7707     return false;
7708   }
7709 
7710   // Check the function attributes and profiles to find out if this function
7711   // should be optimized for size.
7712   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
7713       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
7714 
7715   // Entrance to the VPlan-native vectorization path. Outer loops are processed
7716   // here. They may require CFG and instruction level transformations before
7717   // even evaluating whether vectorization is profitable. Since we cannot modify
7718   // the incoming IR, we need to build VPlan upfront in the vectorization
7719   // pipeline.
7720   if (!L->empty())
7721     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
7722                                         ORE, BFI, PSI, Hints);
7723 
7724   assert(L->empty() && "Inner loop expected.");
7725 
7726   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
7727   // count by optimizing for size, to minimize overheads.
7728   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
7729   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
7730     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
7731                       << "This loop is worth vectorizing only if no scalar "
7732                       << "iteration overheads are incurred.");
7733     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
7734       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
7735     else {
7736       LLVM_DEBUG(dbgs() << "\n");
7737       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
7738     }
7739   }
7740 
7741   // Check the function attributes to see if implicit floats are allowed.
7742   // FIXME: This check doesn't seem possibly correct -- what if the loop is
7743   // an integer loop and the vector instructions selected are purely integer
7744   // vector instructions?
7745   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
7746     reportVectorizationFailure(
7747         "Can't vectorize when the NoImplicitFloat attribute is used",
7748         "loop not vectorized due to NoImplicitFloat attribute",
7749         "NoImplicitFloat", ORE, L);
7750     Hints.emitRemarkWithHints();
7751     return false;
7752   }
7753 
7754   // Check if the target supports potentially unsafe FP vectorization.
7755   // FIXME: Add a check for the type of safety issue (denormal, signaling)
7756   // for the target we're vectorizing for, to make sure none of the
7757   // additional fp-math flags can help.
7758   if (Hints.isPotentiallyUnsafe() &&
7759       TTI->isFPVectorizationPotentiallyUnsafe()) {
7760     reportVectorizationFailure(
7761         "Potentially unsafe FP op prevents vectorization",
7762         "loop not vectorized due to unsafe FP support.",
7763         "UnsafeFP", ORE, L);
7764     Hints.emitRemarkWithHints();
7765     return false;
7766   }
7767 
7768   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
7769   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
7770 
7771   // If an override option has been passed in for interleaved accesses, use it.
7772   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
7773     UseInterleaved = EnableInterleavedMemAccesses;
7774 
7775   // Analyze interleaved memory accesses.
7776   if (UseInterleaved) {
7777     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
7778   }
7779 
7780   // Use the cost model.
7781   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
7782                                 F, &Hints, IAI);
7783   CM.collectValuesToIgnore();
7784 
7785   // Use the planner for vectorization.
7786   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE);
7787 
7788   // Get user vectorization factor.
7789   unsigned UserVF = Hints.getWidth();
7790 
7791   // Plan how to best vectorize, return the best VF and its cost.
7792   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF);
7793 
7794   VectorizationFactor VF = VectorizationFactor::Disabled();
7795   unsigned IC = 1;
7796   unsigned UserIC = Hints.getInterleave();
7797 
7798   if (MaybeVF) {
7799     VF = *MaybeVF;
7800     // Select the interleave count.
7801     IC = CM.selectInterleaveCount(VF.Width, VF.Cost);
7802   }
7803 
7804   // Identify the diagnostic messages that should be produced.
7805   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
7806   bool VectorizeLoop = true, InterleaveLoop = true;
7807   if (Requirements.doesNotMeet(F, L, Hints)) {
7808     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
7809                          "requirements.\n");
7810     Hints.emitRemarkWithHints();
7811     return false;
7812   }
7813 
7814   if (VF.Width == 1) {
7815     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
7816     VecDiagMsg = std::make_pair(
7817         "VectorizationNotBeneficial",
7818         "the cost-model indicates that vectorization is not beneficial");
7819     VectorizeLoop = false;
7820   }
7821 
7822   if (!MaybeVF && UserIC > 1) {
7823     // Tell the user interleaving was avoided up-front, despite being explicitly
7824     // requested.
7825     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
7826                          "interleaving should be avoided up front\n");
7827     IntDiagMsg = std::make_pair(
7828         "InterleavingAvoided",
7829         "Ignoring UserIC, because interleaving was avoided up front");
7830     InterleaveLoop = false;
7831   } else if (IC == 1 && UserIC <= 1) {
7832     // Tell the user interleaving is not beneficial.
7833     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
7834     IntDiagMsg = std::make_pair(
7835         "InterleavingNotBeneficial",
7836         "the cost-model indicates that interleaving is not beneficial");
7837     InterleaveLoop = false;
7838     if (UserIC == 1) {
7839       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
7840       IntDiagMsg.second +=
7841           " and is explicitly disabled or interleave count is set to 1";
7842     }
7843   } else if (IC > 1 && UserIC == 1) {
7844     // Tell the user interleaving is beneficial, but it explicitly disabled.
7845     LLVM_DEBUG(
7846         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
7847     IntDiagMsg = std::make_pair(
7848         "InterleavingBeneficialButDisabled",
7849         "the cost-model indicates that interleaving is beneficial "
7850         "but is explicitly disabled or interleave count is set to 1");
7851     InterleaveLoop = false;
7852   }
7853 
7854   // Override IC if user provided an interleave count.
7855   IC = UserIC > 0 ? UserIC : IC;
7856 
7857   // Emit diagnostic messages, if any.
7858   const char *VAPassName = Hints.vectorizeAnalysisPassName();
7859   if (!VectorizeLoop && !InterleaveLoop) {
7860     // Do not vectorize or interleaving the loop.
7861     ORE->emit([&]() {
7862       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
7863                                       L->getStartLoc(), L->getHeader())
7864              << VecDiagMsg.second;
7865     });
7866     ORE->emit([&]() {
7867       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
7868                                       L->getStartLoc(), L->getHeader())
7869              << IntDiagMsg.second;
7870     });
7871     return false;
7872   } else if (!VectorizeLoop && InterleaveLoop) {
7873     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7874     ORE->emit([&]() {
7875       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
7876                                         L->getStartLoc(), L->getHeader())
7877              << VecDiagMsg.second;
7878     });
7879   } else if (VectorizeLoop && !InterleaveLoop) {
7880     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
7881                       << ") in " << DebugLocStr << '\n');
7882     ORE->emit([&]() {
7883       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
7884                                         L->getStartLoc(), L->getHeader())
7885              << IntDiagMsg.second;
7886     });
7887   } else if (VectorizeLoop && InterleaveLoop) {
7888     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
7889                       << ") in " << DebugLocStr << '\n');
7890     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7891   }
7892 
7893   LVP.setBestPlan(VF.Width, IC);
7894 
7895   using namespace ore;
7896   bool DisableRuntimeUnroll = false;
7897   MDNode *OrigLoopID = L->getLoopID();
7898 
7899   if (!VectorizeLoop) {
7900     assert(IC > 1 && "interleave count should not be 1 or 0");
7901     // If we decided that it is not legal to vectorize the loop, then
7902     // interleave it.
7903     InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
7904                                &CM);
7905     LVP.executePlan(Unroller, DT);
7906 
7907     ORE->emit([&]() {
7908       return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
7909                                 L->getHeader())
7910              << "interleaved loop (interleaved count: "
7911              << NV("InterleaveCount", IC) << ")";
7912     });
7913   } else {
7914     // If we decided that it is *legal* to vectorize the loop, then do it.
7915     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
7916                            &LVL, &CM);
7917     LVP.executePlan(LB, DT);
7918     ++LoopsVectorized;
7919 
7920     // Add metadata to disable runtime unrolling a scalar loop when there are
7921     // no runtime checks about strides and memory. A scalar loop that is
7922     // rarely used is not worth unrolling.
7923     if (!LB.areSafetyChecksAdded())
7924       DisableRuntimeUnroll = true;
7925 
7926     // Report the vectorization decision.
7927     ORE->emit([&]() {
7928       return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
7929                                 L->getHeader())
7930              << "vectorized loop (vectorization width: "
7931              << NV("VectorizationFactor", VF.Width)
7932              << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
7933     });
7934   }
7935 
7936   Optional<MDNode *> RemainderLoopID =
7937       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
7938                                       LLVMLoopVectorizeFollowupEpilogue});
7939   if (RemainderLoopID.hasValue()) {
7940     L->setLoopID(RemainderLoopID.getValue());
7941   } else {
7942     if (DisableRuntimeUnroll)
7943       AddRuntimeUnrollDisableMetaData(L);
7944 
7945     // Mark the loop as already vectorized to avoid vectorizing again.
7946     Hints.setAlreadyVectorized();
7947   }
7948 
7949   LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent()));
7950   return true;
7951 }
7952 
7953 LoopVectorizeResult LoopVectorizePass::runImpl(
7954     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
7955     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
7956     DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_,
7957     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
7958     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
7959   SE = &SE_;
7960   LI = &LI_;
7961   TTI = &TTI_;
7962   DT = &DT_;
7963   BFI = &BFI_;
7964   TLI = TLI_;
7965   AA = &AA_;
7966   AC = &AC_;
7967   GetLAA = &GetLAA_;
7968   DB = &DB_;
7969   ORE = &ORE_;
7970   PSI = PSI_;
7971 
7972   // Don't attempt if
7973   // 1. the target claims to have no vector registers, and
7974   // 2. interleaving won't help ILP.
7975   //
7976   // The second condition is necessary because, even if the target has no
7977   // vector registers, loop vectorization may still enable scalar
7978   // interleaving.
7979   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
7980       TTI->getMaxInterleaveFactor(1) < 2)
7981     return LoopVectorizeResult(false, false);
7982 
7983   bool Changed = false, CFGChanged = false;
7984 
7985   // The vectorizer requires loops to be in simplified form.
7986   // Since simplification may add new inner loops, it has to run before the
7987   // legality and profitability checks. This means running the loop vectorizer
7988   // will simplify all loops, regardless of whether anything end up being
7989   // vectorized.
7990   for (auto &L : *LI)
7991     Changed |= CFGChanged |=
7992         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
7993 
7994   // Build up a worklist of inner-loops to vectorize. This is necessary as
7995   // the act of vectorizing or partially unrolling a loop creates new loops
7996   // and can invalidate iterators across the loops.
7997   SmallVector<Loop *, 8> Worklist;
7998 
7999   for (Loop *L : *LI)
8000     collectSupportedLoops(*L, LI, ORE, Worklist);
8001 
8002   LoopsAnalyzed += Worklist.size();
8003 
8004   // Now walk the identified inner loops.
8005   while (!Worklist.empty()) {
8006     Loop *L = Worklist.pop_back_val();
8007 
8008     // For the inner loops we actually process, form LCSSA to simplify the
8009     // transform.
8010     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
8011 
8012     Changed |= CFGChanged |= processLoop(L);
8013   }
8014 
8015   // Process each loop nest in the function.
8016   return LoopVectorizeResult(Changed, CFGChanged);
8017 }
8018 
8019 PreservedAnalyses LoopVectorizePass::run(Function &F,
8020                                          FunctionAnalysisManager &AM) {
8021     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
8022     auto &LI = AM.getResult<LoopAnalysis>(F);
8023     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
8024     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
8025     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
8026     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
8027     auto &AA = AM.getResult<AAManager>(F);
8028     auto &AC = AM.getResult<AssumptionAnalysis>(F);
8029     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
8030     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
8031     MemorySSA *MSSA = EnableMSSALoopDependency
8032                           ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
8033                           : nullptr;
8034 
8035     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
8036     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
8037         [&](Loop &L) -> const LoopAccessInfo & {
8038       LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, MSSA};
8039       return LAM.getResult<LoopAccessAnalysis>(L, AR);
8040     };
8041     const ModuleAnalysisManager &MAM =
8042         AM.getResult<ModuleAnalysisManagerFunctionProxy>(F).getManager();
8043     ProfileSummaryInfo *PSI =
8044         MAM.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
8045     LoopVectorizeResult Result =
8046         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
8047     if (!Result.MadeAnyChange)
8048       return PreservedAnalyses::all();
8049     PreservedAnalyses PA;
8050 
8051     // We currently do not preserve loopinfo/dominator analyses with outer loop
8052     // vectorization. Until this is addressed, mark these analyses as preserved
8053     // only for non-VPlan-native path.
8054     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
8055     if (!EnableVPlanNativePath) {
8056       PA.preserve<LoopAnalysis>();
8057       PA.preserve<DominatorTreeAnalysis>();
8058     }
8059     PA.preserve<BasicAA>();
8060     PA.preserve<GlobalsAA>();
8061     if (!Result.MadeCFGChange)
8062       PA.preserveSet<CFGAnalyses>();
8063     return PA;
8064 }
8065