1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/MemorySSA.h"
91 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
92 #include "llvm/Analysis/ProfileSummaryInfo.h"
93 #include "llvm/Analysis/ScalarEvolution.h"
94 #include "llvm/Analysis/ScalarEvolutionExpander.h"
95 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
96 #include "llvm/Analysis/TargetLibraryInfo.h"
97 #include "llvm/Analysis/TargetTransformInfo.h"
98 #include "llvm/Analysis/VectorUtils.h"
99 #include "llvm/IR/Attributes.h"
100 #include "llvm/IR/BasicBlock.h"
101 #include "llvm/IR/CFG.h"
102 #include "llvm/IR/Constant.h"
103 #include "llvm/IR/Constants.h"
104 #include "llvm/IR/DataLayout.h"
105 #include "llvm/IR/DebugInfoMetadata.h"
106 #include "llvm/IR/DebugLoc.h"
107 #include "llvm/IR/DerivedTypes.h"
108 #include "llvm/IR/DiagnosticInfo.h"
109 #include "llvm/IR/Dominators.h"
110 #include "llvm/IR/Function.h"
111 #include "llvm/IR/IRBuilder.h"
112 #include "llvm/IR/InstrTypes.h"
113 #include "llvm/IR/Instruction.h"
114 #include "llvm/IR/Instructions.h"
115 #include "llvm/IR/IntrinsicInst.h"
116 #include "llvm/IR/Intrinsics.h"
117 #include "llvm/IR/LLVMContext.h"
118 #include "llvm/IR/Metadata.h"
119 #include "llvm/IR/Module.h"
120 #include "llvm/IR/Operator.h"
121 #include "llvm/IR/Type.h"
122 #include "llvm/IR/Use.h"
123 #include "llvm/IR/User.h"
124 #include "llvm/IR/Value.h"
125 #include "llvm/IR/ValueHandle.h"
126 #include "llvm/IR/Verifier.h"
127 #include "llvm/InitializePasses.h"
128 #include "llvm/Pass.h"
129 #include "llvm/Support/Casting.h"
130 #include "llvm/Support/CommandLine.h"
131 #include "llvm/Support/Compiler.h"
132 #include "llvm/Support/Debug.h"
133 #include "llvm/Support/ErrorHandling.h"
134 #include "llvm/Support/MathExtras.h"
135 #include "llvm/Support/raw_ostream.h"
136 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
137 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
138 #include "llvm/Transforms/Utils/LoopSimplify.h"
139 #include "llvm/Transforms/Utils/LoopUtils.h"
140 #include "llvm/Transforms/Utils/LoopVersioning.h"
141 #include "llvm/Transforms/Utils/SizeOpts.h"
142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
143 #include <algorithm>
144 #include <cassert>
145 #include <cstdint>
146 #include <cstdlib>
147 #include <functional>
148 #include <iterator>
149 #include <limits>
150 #include <memory>
151 #include <string>
152 #include <tuple>
153 #include <utility>
154 
155 using namespace llvm;
156 
157 #define LV_NAME "loop-vectorize"
158 #define DEBUG_TYPE LV_NAME
159 
160 /// @{
161 /// Metadata attribute names
162 static const char *const LLVMLoopVectorizeFollowupAll =
163     "llvm.loop.vectorize.followup_all";
164 static const char *const LLVMLoopVectorizeFollowupVectorized =
165     "llvm.loop.vectorize.followup_vectorized";
166 static const char *const LLVMLoopVectorizeFollowupEpilogue =
167     "llvm.loop.vectorize.followup_epilogue";
168 /// @}
169 
170 STATISTIC(LoopsVectorized, "Number of loops vectorized");
171 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
172 
173 /// Loops with a known constant trip count below this number are vectorized only
174 /// if no scalar iteration overheads are incurred.
175 static cl::opt<unsigned> TinyTripCountVectorThreshold(
176     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
177     cl::desc("Loops with a constant trip count that is smaller than this "
178              "value are vectorized only if no scalar iteration overheads "
179              "are incurred."));
180 
181 // Indicates that an epilogue is undesired, predication is preferred.
182 // This means that the vectorizer will try to fold the loop-tail (epilogue)
183 // into the loop and predicate the loop body accordingly.
184 static cl::opt<bool> PreferPredicateOverEpilog(
185     "prefer-predicate-over-epilog", cl::init(false), cl::Hidden,
186     cl::desc("Indicate that an epilogue is undesired, predication should be "
187              "used instead."));
188 
189 static cl::opt<bool> MaximizeBandwidth(
190     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
191     cl::desc("Maximize bandwidth when selecting vectorization factor which "
192              "will be determined by the smallest type in loop."));
193 
194 static cl::opt<bool> EnableInterleavedMemAccesses(
195     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
196     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
197 
198 /// An interleave-group may need masking if it resides in a block that needs
199 /// predication, or in order to mask away gaps.
200 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
201     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
202     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
203 
204 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
205     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
206     cl::desc("We don't interleave loops with a estimated constant trip count "
207              "below this number"));
208 
209 static cl::opt<unsigned> ForceTargetNumScalarRegs(
210     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
211     cl::desc("A flag that overrides the target's number of scalar registers."));
212 
213 static cl::opt<unsigned> ForceTargetNumVectorRegs(
214     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
215     cl::desc("A flag that overrides the target's number of vector registers."));
216 
217 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
218     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
219     cl::desc("A flag that overrides the target's max interleave factor for "
220              "scalar loops."));
221 
222 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
223     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
224     cl::desc("A flag that overrides the target's max interleave factor for "
225              "vectorized loops."));
226 
227 static cl::opt<unsigned> ForceTargetInstructionCost(
228     "force-target-instruction-cost", cl::init(0), cl::Hidden,
229     cl::desc("A flag that overrides the target's expected cost for "
230              "an instruction to a single constant value. Mostly "
231              "useful for getting consistent testing."));
232 
233 static cl::opt<unsigned> SmallLoopCost(
234     "small-loop-cost", cl::init(20), cl::Hidden,
235     cl::desc(
236         "The cost of a loop that is considered 'small' by the interleaver."));
237 
238 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
239     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
240     cl::desc("Enable the use of the block frequency analysis to access PGO "
241              "heuristics minimizing code growth in cold regions and being more "
242              "aggressive in hot regions."));
243 
244 // Runtime interleave loops for load/store throughput.
245 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
246     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
247     cl::desc(
248         "Enable runtime interleaving until load/store ports are saturated"));
249 
250 /// The number of stores in a loop that are allowed to need predication.
251 static cl::opt<unsigned> NumberOfStoresToPredicate(
252     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
253     cl::desc("Max number of stores to be predicated behind an if."));
254 
255 static cl::opt<bool> EnableIndVarRegisterHeur(
256     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
257     cl::desc("Count the induction variable only once when interleaving"));
258 
259 static cl::opt<bool> EnableCondStoresVectorization(
260     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
261     cl::desc("Enable if predication of stores during vectorization."));
262 
263 static cl::opt<unsigned> MaxNestedScalarReductionIC(
264     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
265     cl::desc("The maximum interleave count to use when interleaving a scalar "
266              "reduction in a nested loop."));
267 
268 cl::opt<bool> EnableVPlanNativePath(
269     "enable-vplan-native-path", cl::init(false), cl::Hidden,
270     cl::desc("Enable VPlan-native vectorization path with "
271              "support for outer loop vectorization."));
272 
273 // FIXME: Remove this switch once we have divergence analysis. Currently we
274 // assume divergent non-backedge branches when this switch is true.
275 cl::opt<bool> EnableVPlanPredication(
276     "enable-vplan-predication", cl::init(false), cl::Hidden,
277     cl::desc("Enable VPlan-native vectorization path predicator with "
278              "support for outer loop vectorization."));
279 
280 // This flag enables the stress testing of the VPlan H-CFG construction in the
281 // VPlan-native vectorization path. It must be used in conjuction with
282 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
283 // verification of the H-CFGs built.
284 static cl::opt<bool> VPlanBuildStressTest(
285     "vplan-build-stress-test", cl::init(false), cl::Hidden,
286     cl::desc(
287         "Build VPlan for every supported loop nest in the function and bail "
288         "out right after the build (stress test the VPlan H-CFG construction "
289         "in the VPlan-native vectorization path)."));
290 
291 cl::opt<bool> llvm::EnableLoopInterleaving(
292     "interleave-loops", cl::init(true), cl::Hidden,
293     cl::desc("Enable loop interleaving in Loop vectorization passes"));
294 cl::opt<bool> llvm::EnableLoopVectorization(
295     "vectorize-loops", cl::init(true), cl::Hidden,
296     cl::desc("Run the Loop vectorization passes"));
297 
298 /// A helper function for converting Scalar types to vector types.
299 /// If the incoming type is void, we return void. If the VF is 1, we return
300 /// the scalar type.
301 static Type *ToVectorTy(Type *Scalar, unsigned VF) {
302   if (Scalar->isVoidTy() || VF == 1)
303     return Scalar;
304   return VectorType::get(Scalar, VF);
305 }
306 
307 /// A helper function that returns the type of loaded or stored value.
308 static Type *getMemInstValueType(Value *I) {
309   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
310          "Expected Load or Store instruction");
311   if (auto *LI = dyn_cast<LoadInst>(I))
312     return LI->getType();
313   return cast<StoreInst>(I)->getValueOperand()->getType();
314 }
315 
316 /// A helper function that returns true if the given type is irregular. The
317 /// type is irregular if its allocated size doesn't equal the store size of an
318 /// element of the corresponding vector type at the given vectorization factor.
319 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) {
320   // Determine if an array of VF elements of type Ty is "bitcast compatible"
321   // with a <VF x Ty> vector.
322   if (VF > 1) {
323     auto *VectorTy = VectorType::get(Ty, VF);
324     return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy);
325   }
326 
327   // If the vectorization factor is one, we just check if an array of type Ty
328   // requires padding between elements.
329   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
330 }
331 
332 /// A helper function that returns the reciprocal of the block probability of
333 /// predicated blocks. If we return X, we are assuming the predicated block
334 /// will execute once for every X iterations of the loop header.
335 ///
336 /// TODO: We should use actual block probability here, if available. Currently,
337 ///       we always assume predicated blocks have a 50% chance of executing.
338 static unsigned getReciprocalPredBlockProb() { return 2; }
339 
340 /// A helper function that adds a 'fast' flag to floating-point operations.
341 static Value *addFastMathFlag(Value *V) {
342   if (isa<FPMathOperator>(V))
343     cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast());
344   return V;
345 }
346 
347 static Value *addFastMathFlag(Value *V, FastMathFlags FMF) {
348   if (isa<FPMathOperator>(V))
349     cast<Instruction>(V)->setFastMathFlags(FMF);
350   return V;
351 }
352 
353 /// A helper function that returns an integer or floating-point constant with
354 /// value C.
355 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
356   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
357                            : ConstantFP::get(Ty, C);
358 }
359 
360 /// Returns "best known" trip count for the specified loop \p L as defined by
361 /// the following procedure:
362 ///   1) Returns exact trip count if it is known.
363 ///   2) Returns expected trip count according to profile data if any.
364 ///   3) Returns upper bound estimate if it is known.
365 ///   4) Returns None if all of the above failed.
366 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
367   // Check if exact trip count is known.
368   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
369     return ExpectedTC;
370 
371   // Check if there is an expected trip count available from profile data.
372   if (LoopVectorizeWithBlockFrequency)
373     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
374       return EstimatedTC;
375 
376   // Check if upper bound estimate is known.
377   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
378     return ExpectedTC;
379 
380   return None;
381 }
382 
383 namespace llvm {
384 
385 /// InnerLoopVectorizer vectorizes loops which contain only one basic
386 /// block to a specified vectorization factor (VF).
387 /// This class performs the widening of scalars into vectors, or multiple
388 /// scalars. This class also implements the following features:
389 /// * It inserts an epilogue loop for handling loops that don't have iteration
390 ///   counts that are known to be a multiple of the vectorization factor.
391 /// * It handles the code generation for reduction variables.
392 /// * Scalarization (implementation using scalars) of un-vectorizable
393 ///   instructions.
394 /// InnerLoopVectorizer does not perform any vectorization-legality
395 /// checks, and relies on the caller to check for the different legality
396 /// aspects. The InnerLoopVectorizer relies on the
397 /// LoopVectorizationLegality class to provide information about the induction
398 /// and reduction variables that were found to a given vectorization factor.
399 class InnerLoopVectorizer {
400 public:
401   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
402                       LoopInfo *LI, DominatorTree *DT,
403                       const TargetLibraryInfo *TLI,
404                       const TargetTransformInfo *TTI, AssumptionCache *AC,
405                       OptimizationRemarkEmitter *ORE, unsigned VecWidth,
406                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
407                       LoopVectorizationCostModel *CM)
408       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
409         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
410         Builder(PSE.getSE()->getContext()),
411         VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {}
412   virtual ~InnerLoopVectorizer() = default;
413 
414   /// Create a new empty loop. Unlink the old loop and connect the new one.
415   /// Return the pre-header block of the new loop.
416   BasicBlock *createVectorizedLoopSkeleton();
417 
418   /// Widen a single instruction within the innermost loop.
419   void widenInstruction(Instruction &I);
420 
421   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
422   void fixVectorizedLoop();
423 
424   // Return true if any runtime check is added.
425   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
426 
427   /// A type for vectorized values in the new loop. Each value from the
428   /// original loop, when vectorized, is represented by UF vector values in the
429   /// new unrolled loop, where UF is the unroll factor.
430   using VectorParts = SmallVector<Value *, 2>;
431 
432   /// Vectorize a single GetElementPtrInst based on information gathered and
433   /// decisions taken during planning.
434   void widenGEP(GetElementPtrInst *GEP, unsigned UF, unsigned VF,
435                 bool IsPtrLoopInvariant, SmallBitVector &IsIndexLoopInvariant);
436 
437   /// Vectorize a single PHINode in a block. This method handles the induction
438   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
439   /// arbitrary length vectors.
440   void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF);
441 
442   /// A helper function to scalarize a single Instruction in the innermost loop.
443   /// Generates a sequence of scalar instances for each lane between \p MinLane
444   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
445   /// inclusive..
446   void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance,
447                             bool IfPredicateInstr);
448 
449   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
450   /// is provided, the integer induction variable will first be truncated to
451   /// the corresponding type.
452   void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr);
453 
454   /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a
455   /// vector or scalar value on-demand if one is not yet available. When
456   /// vectorizing a loop, we visit the definition of an instruction before its
457   /// uses. When visiting the definition, we either vectorize or scalarize the
458   /// instruction, creating an entry for it in the corresponding map. (In some
459   /// cases, such as induction variables, we will create both vector and scalar
460   /// entries.) Then, as we encounter uses of the definition, we derive values
461   /// for each scalar or vector use unless such a value is already available.
462   /// For example, if we scalarize a definition and one of its uses is vector,
463   /// we build the required vector on-demand with an insertelement sequence
464   /// when visiting the use. Otherwise, if the use is scalar, we can use the
465   /// existing scalar definition.
466   ///
467   /// Return a value in the new loop corresponding to \p V from the original
468   /// loop at unroll index \p Part. If the value has already been vectorized,
469   /// the corresponding vector entry in VectorLoopValueMap is returned. If,
470   /// however, the value has a scalar entry in VectorLoopValueMap, we construct
471   /// a new vector value on-demand by inserting the scalar values into a vector
472   /// with an insertelement sequence. If the value has been neither vectorized
473   /// nor scalarized, it must be loop invariant, so we simply broadcast the
474   /// value into a vector.
475   Value *getOrCreateVectorValue(Value *V, unsigned Part);
476 
477   /// Return a value in the new loop corresponding to \p V from the original
478   /// loop at unroll and vector indices \p Instance. If the value has been
479   /// vectorized but not scalarized, the necessary extractelement instruction
480   /// will be generated.
481   Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance);
482 
483   /// Construct the vector value of a scalarized value \p V one lane at a time.
484   void packScalarIntoVectorValue(Value *V, const VPIteration &Instance);
485 
486   /// Try to vectorize the interleaved access group that \p Instr belongs to
487   /// with the base address given in \p Addr, optionally masking the vector
488   /// operations if \p BlockInMask is non-null. Use \p State to translate given
489   /// VPValues to IR values in the vectorized loop.
490   void vectorizeInterleaveGroup(Instruction *Instr, VPTransformState &State,
491                                 VPValue *Addr, VPValue *BlockInMask = nullptr);
492 
493   /// Vectorize Load and Store instructions with the base address given in \p
494   /// Addr, optionally masking the vector operations if \p BlockInMask is
495   /// non-null. Use \p State to translate given VPValues to IR values in the
496   /// vectorized loop.
497   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
498                                   VPValue *Addr,
499                                   VPValue *BlockInMask = nullptr);
500 
501   /// Set the debug location in the builder using the debug location in
502   /// the instruction.
503   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
504 
505   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
506   void fixNonInductionPHIs(void);
507 
508 protected:
509   friend class LoopVectorizationPlanner;
510 
511   /// A small list of PHINodes.
512   using PhiVector = SmallVector<PHINode *, 4>;
513 
514   /// A type for scalarized values in the new loop. Each value from the
515   /// original loop, when scalarized, is represented by UF x VF scalar values
516   /// in the new unrolled loop, where UF is the unroll factor and VF is the
517   /// vectorization factor.
518   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
519 
520   /// Set up the values of the IVs correctly when exiting the vector loop.
521   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
522                     Value *CountRoundDown, Value *EndValue,
523                     BasicBlock *MiddleBlock);
524 
525   /// Create a new induction variable inside L.
526   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
527                                    Value *Step, Instruction *DL);
528 
529   /// Handle all cross-iteration phis in the header.
530   void fixCrossIterationPHIs();
531 
532   /// Fix a first-order recurrence. This is the second phase of vectorizing
533   /// this phi node.
534   void fixFirstOrderRecurrence(PHINode *Phi);
535 
536   /// Fix a reduction cross-iteration phi. This is the second phase of
537   /// vectorizing this phi node.
538   void fixReduction(PHINode *Phi);
539 
540   /// Clear NSW/NUW flags from reduction instructions if necessary.
541   void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc);
542 
543   /// The Loop exit block may have single value PHI nodes with some
544   /// incoming value. While vectorizing we only handled real values
545   /// that were defined inside the loop and we should have one value for
546   /// each predecessor of its parent basic block. See PR14725.
547   void fixLCSSAPHIs();
548 
549   /// Iteratively sink the scalarized operands of a predicated instruction into
550   /// the block that was created for it.
551   void sinkScalarOperands(Instruction *PredInst);
552 
553   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
554   /// represented as.
555   void truncateToMinimalBitwidths();
556 
557   /// Create a broadcast instruction. This method generates a broadcast
558   /// instruction (shuffle) for loop invariant values and for the induction
559   /// value. If this is the induction variable then we extend it to N, N+1, ...
560   /// this is needed because each iteration in the loop corresponds to a SIMD
561   /// element.
562   virtual Value *getBroadcastInstrs(Value *V);
563 
564   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
565   /// to each vector element of Val. The sequence starts at StartIndex.
566   /// \p Opcode is relevant for FP induction variable.
567   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
568                                Instruction::BinaryOps Opcode =
569                                Instruction::BinaryOpsEnd);
570 
571   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
572   /// variable on which to base the steps, \p Step is the size of the step, and
573   /// \p EntryVal is the value from the original loop that maps to the steps.
574   /// Note that \p EntryVal doesn't have to be an induction variable - it
575   /// can also be a truncate instruction.
576   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
577                         const InductionDescriptor &ID);
578 
579   /// Create a vector induction phi node based on an existing scalar one. \p
580   /// EntryVal is the value from the original loop that maps to the vector phi
581   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
582   /// truncate instruction, instead of widening the original IV, we widen a
583   /// version of the IV truncated to \p EntryVal's type.
584   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
585                                        Value *Step, Instruction *EntryVal);
586 
587   /// Returns true if an instruction \p I should be scalarized instead of
588   /// vectorized for the chosen vectorization factor.
589   bool shouldScalarizeInstruction(Instruction *I) const;
590 
591   /// Returns true if we should generate a scalar version of \p IV.
592   bool needsScalarInduction(Instruction *IV) const;
593 
594   /// If there is a cast involved in the induction variable \p ID, which should
595   /// be ignored in the vectorized loop body, this function records the
596   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
597   /// cast. We had already proved that the casted Phi is equal to the uncasted
598   /// Phi in the vectorized loop (under a runtime guard), and therefore
599   /// there is no need to vectorize the cast - the same value can be used in the
600   /// vector loop for both the Phi and the cast.
601   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
602   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
603   ///
604   /// \p EntryVal is the value from the original loop that maps to the vector
605   /// phi node and is used to distinguish what is the IV currently being
606   /// processed - original one (if \p EntryVal is a phi corresponding to the
607   /// original IV) or the "newly-created" one based on the proof mentioned above
608   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
609   /// latter case \p EntryVal is a TruncInst and we must not record anything for
610   /// that IV, but it's error-prone to expect callers of this routine to care
611   /// about that, hence this explicit parameter.
612   void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID,
613                                              const Instruction *EntryVal,
614                                              Value *VectorLoopValue,
615                                              unsigned Part,
616                                              unsigned Lane = UINT_MAX);
617 
618   /// Generate a shuffle sequence that will reverse the vector Vec.
619   virtual Value *reverseVector(Value *Vec);
620 
621   /// Returns (and creates if needed) the original loop trip count.
622   Value *getOrCreateTripCount(Loop *NewLoop);
623 
624   /// Returns (and creates if needed) the trip count of the widened loop.
625   Value *getOrCreateVectorTripCount(Loop *NewLoop);
626 
627   /// Returns a bitcasted value to the requested vector type.
628   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
629   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
630                                 const DataLayout &DL);
631 
632   /// Emit a bypass check to see if the vector trip count is zero, including if
633   /// it overflows.
634   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
635 
636   /// Emit a bypass check to see if all of the SCEV assumptions we've
637   /// had to make are correct.
638   void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
639 
640   /// Emit bypass checks to check any memory assumptions we may have made.
641   void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
642 
643   /// Compute the transformed value of Index at offset StartValue using step
644   /// StepValue.
645   /// For integer induction, returns StartValue + Index * StepValue.
646   /// For pointer induction, returns StartValue[Index * StepValue].
647   /// FIXME: The newly created binary instructions should contain nsw/nuw
648   /// flags, which can be found from the original scalar operations.
649   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
650                               const DataLayout &DL,
651                               const InductionDescriptor &ID) const;
652 
653   /// Add additional metadata to \p To that was not present on \p Orig.
654   ///
655   /// Currently this is used to add the noalias annotations based on the
656   /// inserted memchecks.  Use this for instructions that are *cloned* into the
657   /// vector loop.
658   void addNewMetadata(Instruction *To, const Instruction *Orig);
659 
660   /// Add metadata from one instruction to another.
661   ///
662   /// This includes both the original MDs from \p From and additional ones (\see
663   /// addNewMetadata).  Use this for *newly created* instructions in the vector
664   /// loop.
665   void addMetadata(Instruction *To, Instruction *From);
666 
667   /// Similar to the previous function but it adds the metadata to a
668   /// vector of instructions.
669   void addMetadata(ArrayRef<Value *> To, Instruction *From);
670 
671   /// The original loop.
672   Loop *OrigLoop;
673 
674   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
675   /// dynamic knowledge to simplify SCEV expressions and converts them to a
676   /// more usable form.
677   PredicatedScalarEvolution &PSE;
678 
679   /// Loop Info.
680   LoopInfo *LI;
681 
682   /// Dominator Tree.
683   DominatorTree *DT;
684 
685   /// Alias Analysis.
686   AliasAnalysis *AA;
687 
688   /// Target Library Info.
689   const TargetLibraryInfo *TLI;
690 
691   /// Target Transform Info.
692   const TargetTransformInfo *TTI;
693 
694   /// Assumption Cache.
695   AssumptionCache *AC;
696 
697   /// Interface to emit optimization remarks.
698   OptimizationRemarkEmitter *ORE;
699 
700   /// LoopVersioning.  It's only set up (non-null) if memchecks were
701   /// used.
702   ///
703   /// This is currently only used to add no-alias metadata based on the
704   /// memchecks.  The actually versioning is performed manually.
705   std::unique_ptr<LoopVersioning> LVer;
706 
707   /// The vectorization SIMD factor to use. Each vector will have this many
708   /// vector elements.
709   unsigned VF;
710 
711   /// The vectorization unroll factor to use. Each scalar is vectorized to this
712   /// many different vector instructions.
713   unsigned UF;
714 
715   /// The builder that we use
716   IRBuilder<> Builder;
717 
718   // --- Vectorization state ---
719 
720   /// The vector-loop preheader.
721   BasicBlock *LoopVectorPreHeader;
722 
723   /// The scalar-loop preheader.
724   BasicBlock *LoopScalarPreHeader;
725 
726   /// Middle Block between the vector and the scalar.
727   BasicBlock *LoopMiddleBlock;
728 
729   /// The ExitBlock of the scalar loop.
730   BasicBlock *LoopExitBlock;
731 
732   /// The vector loop body.
733   BasicBlock *LoopVectorBody;
734 
735   /// The scalar loop body.
736   BasicBlock *LoopScalarBody;
737 
738   /// A list of all bypass blocks. The first block is the entry of the loop.
739   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
740 
741   /// The new Induction variable which was added to the new block.
742   PHINode *Induction = nullptr;
743 
744   /// The induction variable of the old basic block.
745   PHINode *OldInduction = nullptr;
746 
747   /// Maps values from the original loop to their corresponding values in the
748   /// vectorized loop. A key value can map to either vector values, scalar
749   /// values or both kinds of values, depending on whether the key was
750   /// vectorized and scalarized.
751   VectorizerValueMap VectorLoopValueMap;
752 
753   /// Store instructions that were predicated.
754   SmallVector<Instruction *, 4> PredicatedInstructions;
755 
756   /// Trip count of the original loop.
757   Value *TripCount = nullptr;
758 
759   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
760   Value *VectorTripCount = nullptr;
761 
762   /// The legality analysis.
763   LoopVectorizationLegality *Legal;
764 
765   /// The profitablity analysis.
766   LoopVectorizationCostModel *Cost;
767 
768   // Record whether runtime checks are added.
769   bool AddedSafetyChecks = false;
770 
771   // Holds the end values for each induction variable. We save the end values
772   // so we can later fix-up the external users of the induction variables.
773   DenseMap<PHINode *, Value *> IVEndValues;
774 
775   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
776   // fixed up at the end of vector code generation.
777   SmallVector<PHINode *, 8> OrigPHIsToFix;
778 };
779 
780 class InnerLoopUnroller : public InnerLoopVectorizer {
781 public:
782   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
783                     LoopInfo *LI, DominatorTree *DT,
784                     const TargetLibraryInfo *TLI,
785                     const TargetTransformInfo *TTI, AssumptionCache *AC,
786                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
787                     LoopVectorizationLegality *LVL,
788                     LoopVectorizationCostModel *CM)
789       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1,
790                             UnrollFactor, LVL, CM) {}
791 
792 private:
793   Value *getBroadcastInstrs(Value *V) override;
794   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
795                        Instruction::BinaryOps Opcode =
796                        Instruction::BinaryOpsEnd) override;
797   Value *reverseVector(Value *Vec) override;
798 };
799 
800 } // end namespace llvm
801 
802 /// Look for a meaningful debug location on the instruction or it's
803 /// operands.
804 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
805   if (!I)
806     return I;
807 
808   DebugLoc Empty;
809   if (I->getDebugLoc() != Empty)
810     return I;
811 
812   for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
813     if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
814       if (OpInst->getDebugLoc() != Empty)
815         return OpInst;
816   }
817 
818   return I;
819 }
820 
821 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
822   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
823     const DILocation *DIL = Inst->getDebugLoc();
824     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
825         !isa<DbgInfoIntrinsic>(Inst)) {
826       auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(UF * VF);
827       if (NewDIL)
828         B.SetCurrentDebugLocation(NewDIL.getValue());
829       else
830         LLVM_DEBUG(dbgs()
831                    << "Failed to create new discriminator: "
832                    << DIL->getFilename() << " Line: " << DIL->getLine());
833     }
834     else
835       B.SetCurrentDebugLocation(DIL);
836   } else
837     B.SetCurrentDebugLocation(DebugLoc());
838 }
839 
840 /// Write a record \p DebugMsg about vectorization failure to the debug
841 /// output stream. If \p I is passed, it is an instruction that prevents
842 /// vectorization.
843 #ifndef NDEBUG
844 static void debugVectorizationFailure(const StringRef DebugMsg,
845     Instruction *I) {
846   dbgs() << "LV: Not vectorizing: " << DebugMsg;
847   if (I != nullptr)
848     dbgs() << " " << *I;
849   else
850     dbgs() << '.';
851   dbgs() << '\n';
852 }
853 #endif
854 
855 /// Create an analysis remark that explains why vectorization failed
856 ///
857 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
858 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
859 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
860 /// the location of the remark.  \return the remark object that can be
861 /// streamed to.
862 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
863     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
864   Value *CodeRegion = TheLoop->getHeader();
865   DebugLoc DL = TheLoop->getStartLoc();
866 
867   if (I) {
868     CodeRegion = I->getParent();
869     // If there is no debug location attached to the instruction, revert back to
870     // using the loop's.
871     if (I->getDebugLoc())
872       DL = I->getDebugLoc();
873   }
874 
875   OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
876   R << "loop not vectorized: ";
877   return R;
878 }
879 
880 namespace llvm {
881 
882 void reportVectorizationFailure(const StringRef DebugMsg,
883     const StringRef OREMsg, const StringRef ORETag,
884     OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) {
885   LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I));
886   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
887   ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(),
888                 ORETag, TheLoop, I) << OREMsg);
889 }
890 
891 } // end namespace llvm
892 
893 #ifndef NDEBUG
894 /// \return string containing a file name and a line # for the given loop.
895 static std::string getDebugLocString(const Loop *L) {
896   std::string Result;
897   if (L) {
898     raw_string_ostream OS(Result);
899     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
900       LoopDbgLoc.print(OS);
901     else
902       // Just print the module name.
903       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
904     OS.flush();
905   }
906   return Result;
907 }
908 #endif
909 
910 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
911                                          const Instruction *Orig) {
912   // If the loop was versioned with memchecks, add the corresponding no-alias
913   // metadata.
914   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
915     LVer->annotateInstWithNoAlias(To, Orig);
916 }
917 
918 void InnerLoopVectorizer::addMetadata(Instruction *To,
919                                       Instruction *From) {
920   propagateMetadata(To, From);
921   addNewMetadata(To, From);
922 }
923 
924 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
925                                       Instruction *From) {
926   for (Value *V : To) {
927     if (Instruction *I = dyn_cast<Instruction>(V))
928       addMetadata(I, From);
929   }
930 }
931 
932 namespace llvm {
933 
934 // Loop vectorization cost-model hints how the scalar epilogue loop should be
935 // lowered.
936 enum ScalarEpilogueLowering {
937 
938   // The default: allowing scalar epilogues.
939   CM_ScalarEpilogueAllowed,
940 
941   // Vectorization with OptForSize: don't allow epilogues.
942   CM_ScalarEpilogueNotAllowedOptSize,
943 
944   // A special case of vectorisation with OptForSize: loops with a very small
945   // trip count are considered for vectorization under OptForSize, thereby
946   // making sure the cost of their loop body is dominant, free of runtime
947   // guards and scalar iteration overheads.
948   CM_ScalarEpilogueNotAllowedLowTripLoop,
949 
950   // Loop hint predicate indicating an epilogue is undesired.
951   CM_ScalarEpilogueNotNeededUsePredicate
952 };
953 
954 /// LoopVectorizationCostModel - estimates the expected speedups due to
955 /// vectorization.
956 /// In many cases vectorization is not profitable. This can happen because of
957 /// a number of reasons. In this class we mainly attempt to predict the
958 /// expected speedup/slowdowns due to the supported instruction set. We use the
959 /// TargetTransformInfo to query the different backends for the cost of
960 /// different operations.
961 class LoopVectorizationCostModel {
962 public:
963   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
964                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
965                              LoopVectorizationLegality *Legal,
966                              const TargetTransformInfo &TTI,
967                              const TargetLibraryInfo *TLI, DemandedBits *DB,
968                              AssumptionCache *AC,
969                              OptimizationRemarkEmitter *ORE, const Function *F,
970                              const LoopVectorizeHints *Hints,
971                              InterleavedAccessInfo &IAI)
972       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
973         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
974         Hints(Hints), InterleaveInfo(IAI) {}
975 
976   /// \return An upper bound for the vectorization factor, or None if
977   /// vectorization and interleaving should be avoided up front.
978   Optional<unsigned> computeMaxVF();
979 
980   /// \return True if runtime checks are required for vectorization, and false
981   /// otherwise.
982   bool runtimeChecksRequired();
983 
984   /// \return The most profitable vectorization factor and the cost of that VF.
985   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
986   /// then this vectorization factor will be selected if vectorization is
987   /// possible.
988   VectorizationFactor selectVectorizationFactor(unsigned MaxVF);
989 
990   /// Setup cost-based decisions for user vectorization factor.
991   void selectUserVectorizationFactor(unsigned UserVF) {
992     collectUniformsAndScalars(UserVF);
993     collectInstsToScalarize(UserVF);
994   }
995 
996   /// \return The size (in bits) of the smallest and widest types in the code
997   /// that needs to be vectorized. We ignore values that remain scalar such as
998   /// 64 bit loop indices.
999   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1000 
1001   /// \return The desired interleave count.
1002   /// If interleave count has been specified by metadata it will be returned.
1003   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1004   /// are the selected vectorization factor and the cost of the selected VF.
1005   unsigned selectInterleaveCount(unsigned VF, unsigned LoopCost);
1006 
1007   /// Memory access instruction may be vectorized in more than one way.
1008   /// Form of instruction after vectorization depends on cost.
1009   /// This function takes cost-based decisions for Load/Store instructions
1010   /// and collects them in a map. This decisions map is used for building
1011   /// the lists of loop-uniform and loop-scalar instructions.
1012   /// The calculated cost is saved with widening decision in order to
1013   /// avoid redundant calculations.
1014   void setCostBasedWideningDecision(unsigned VF);
1015 
1016   /// A struct that represents some properties of the register usage
1017   /// of a loop.
1018   struct RegisterUsage {
1019     /// Holds the number of loop invariant values that are used in the loop.
1020     /// The key is ClassID of target-provided register class.
1021     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1022     /// Holds the maximum number of concurrent live intervals in the loop.
1023     /// The key is ClassID of target-provided register class.
1024     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1025   };
1026 
1027   /// \return Returns information about the register usages of the loop for the
1028   /// given vectorization factors.
1029   SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs);
1030 
1031   /// Collect values we want to ignore in the cost model.
1032   void collectValuesToIgnore();
1033 
1034   /// \returns The smallest bitwidth each instruction can be represented with.
1035   /// The vector equivalents of these instructions should be truncated to this
1036   /// type.
1037   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1038     return MinBWs;
1039   }
1040 
1041   /// \returns True if it is more profitable to scalarize instruction \p I for
1042   /// vectorization factor \p VF.
1043   bool isProfitableToScalarize(Instruction *I, unsigned VF) const {
1044     assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1.");
1045 
1046     // Cost model is not run in the VPlan-native path - return conservative
1047     // result until this changes.
1048     if (EnableVPlanNativePath)
1049       return false;
1050 
1051     auto Scalars = InstsToScalarize.find(VF);
1052     assert(Scalars != InstsToScalarize.end() &&
1053            "VF not yet analyzed for scalarization profitability");
1054     return Scalars->second.find(I) != Scalars->second.end();
1055   }
1056 
1057   /// Returns true if \p I is known to be uniform after vectorization.
1058   bool isUniformAfterVectorization(Instruction *I, unsigned VF) const {
1059     if (VF == 1)
1060       return true;
1061 
1062     // Cost model is not run in the VPlan-native path - return conservative
1063     // result until this changes.
1064     if (EnableVPlanNativePath)
1065       return false;
1066 
1067     auto UniformsPerVF = Uniforms.find(VF);
1068     assert(UniformsPerVF != Uniforms.end() &&
1069            "VF not yet analyzed for uniformity");
1070     return UniformsPerVF->second.find(I) != UniformsPerVF->second.end();
1071   }
1072 
1073   /// Returns true if \p I is known to be scalar after vectorization.
1074   bool isScalarAfterVectorization(Instruction *I, unsigned VF) const {
1075     if (VF == 1)
1076       return true;
1077 
1078     // Cost model is not run in the VPlan-native path - return conservative
1079     // result until this changes.
1080     if (EnableVPlanNativePath)
1081       return false;
1082 
1083     auto ScalarsPerVF = Scalars.find(VF);
1084     assert(ScalarsPerVF != Scalars.end() &&
1085            "Scalar values are not calculated for VF");
1086     return ScalarsPerVF->second.find(I) != ScalarsPerVF->second.end();
1087   }
1088 
1089   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1090   /// for vectorization factor \p VF.
1091   bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const {
1092     return VF > 1 && MinBWs.find(I) != MinBWs.end() &&
1093            !isProfitableToScalarize(I, VF) &&
1094            !isScalarAfterVectorization(I, VF);
1095   }
1096 
1097   /// Decision that was taken during cost calculation for memory instruction.
1098   enum InstWidening {
1099     CM_Unknown,
1100     CM_Widen,         // For consecutive accesses with stride +1.
1101     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1102     CM_Interleave,
1103     CM_GatherScatter,
1104     CM_Scalarize
1105   };
1106 
1107   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1108   /// instruction \p I and vector width \p VF.
1109   void setWideningDecision(Instruction *I, unsigned VF, InstWidening W,
1110                            unsigned Cost) {
1111     assert(VF >= 2 && "Expected VF >=2");
1112     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1113   }
1114 
1115   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1116   /// interleaving group \p Grp and vector width \p VF.
1117   void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF,
1118                            InstWidening W, unsigned Cost) {
1119     assert(VF >= 2 && "Expected VF >=2");
1120     /// Broadcast this decicion to all instructions inside the group.
1121     /// But the cost will be assigned to one instruction only.
1122     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1123       if (auto *I = Grp->getMember(i)) {
1124         if (Grp->getInsertPos() == I)
1125           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1126         else
1127           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1128       }
1129     }
1130   }
1131 
1132   /// Return the cost model decision for the given instruction \p I and vector
1133   /// width \p VF. Return CM_Unknown if this instruction did not pass
1134   /// through the cost modeling.
1135   InstWidening getWideningDecision(Instruction *I, unsigned VF) {
1136     assert(VF >= 2 && "Expected VF >=2");
1137 
1138     // Cost model is not run in the VPlan-native path - return conservative
1139     // result until this changes.
1140     if (EnableVPlanNativePath)
1141       return CM_GatherScatter;
1142 
1143     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1144     auto Itr = WideningDecisions.find(InstOnVF);
1145     if (Itr == WideningDecisions.end())
1146       return CM_Unknown;
1147     return Itr->second.first;
1148   }
1149 
1150   /// Return the vectorization cost for the given instruction \p I and vector
1151   /// width \p VF.
1152   unsigned getWideningCost(Instruction *I, unsigned VF) {
1153     assert(VF >= 2 && "Expected VF >=2");
1154     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1155     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1156            "The cost is not calculated");
1157     return WideningDecisions[InstOnVF].second;
1158   }
1159 
1160   /// Return True if instruction \p I is an optimizable truncate whose operand
1161   /// is an induction variable. Such a truncate will be removed by adding a new
1162   /// induction variable with the destination type.
1163   bool isOptimizableIVTruncate(Instruction *I, unsigned VF) {
1164     // If the instruction is not a truncate, return false.
1165     auto *Trunc = dyn_cast<TruncInst>(I);
1166     if (!Trunc)
1167       return false;
1168 
1169     // Get the source and destination types of the truncate.
1170     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1171     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1172 
1173     // If the truncate is free for the given types, return false. Replacing a
1174     // free truncate with an induction variable would add an induction variable
1175     // update instruction to each iteration of the loop. We exclude from this
1176     // check the primary induction variable since it will need an update
1177     // instruction regardless.
1178     Value *Op = Trunc->getOperand(0);
1179     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1180       return false;
1181 
1182     // If the truncated value is not an induction variable, return false.
1183     return Legal->isInductionPhi(Op);
1184   }
1185 
1186   /// Collects the instructions to scalarize for each predicated instruction in
1187   /// the loop.
1188   void collectInstsToScalarize(unsigned VF);
1189 
1190   /// Collect Uniform and Scalar values for the given \p VF.
1191   /// The sets depend on CM decision for Load/Store instructions
1192   /// that may be vectorized as interleave, gather-scatter or scalarized.
1193   void collectUniformsAndScalars(unsigned VF) {
1194     // Do the analysis once.
1195     if (VF == 1 || Uniforms.find(VF) != Uniforms.end())
1196       return;
1197     setCostBasedWideningDecision(VF);
1198     collectLoopUniforms(VF);
1199     collectLoopScalars(VF);
1200   }
1201 
1202   /// Returns true if the target machine supports masked store operation
1203   /// for the given \p DataType and kind of access to \p Ptr.
1204   bool isLegalMaskedStore(Type *DataType, Value *Ptr, MaybeAlign Alignment) {
1205     return Legal->isConsecutivePtr(Ptr) &&
1206            TTI.isLegalMaskedStore(DataType, Alignment);
1207   }
1208 
1209   /// Returns true if the target machine supports masked load operation
1210   /// for the given \p DataType and kind of access to \p Ptr.
1211   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, MaybeAlign Alignment) {
1212     return Legal->isConsecutivePtr(Ptr) &&
1213            TTI.isLegalMaskedLoad(DataType, Alignment);
1214   }
1215 
1216   /// Returns true if the target machine supports masked scatter operation
1217   /// for the given \p DataType.
1218   bool isLegalMaskedScatter(Type *DataType, MaybeAlign Alignment) {
1219     return TTI.isLegalMaskedScatter(DataType, Alignment);
1220   }
1221 
1222   /// Returns true if the target machine supports masked gather operation
1223   /// for the given \p DataType.
1224   bool isLegalMaskedGather(Type *DataType, MaybeAlign Alignment) {
1225     return TTI.isLegalMaskedGather(DataType, Alignment);
1226   }
1227 
1228   /// Returns true if the target machine can represent \p V as a masked gather
1229   /// or scatter operation.
1230   bool isLegalGatherOrScatter(Value *V) {
1231     bool LI = isa<LoadInst>(V);
1232     bool SI = isa<StoreInst>(V);
1233     if (!LI && !SI)
1234       return false;
1235     auto *Ty = getMemInstValueType(V);
1236     MaybeAlign Align = getLoadStoreAlignment(V);
1237     return (LI && isLegalMaskedGather(Ty, Align)) ||
1238            (SI && isLegalMaskedScatter(Ty, Align));
1239   }
1240 
1241   /// Returns true if \p I is an instruction that will be scalarized with
1242   /// predication. Such instructions include conditional stores and
1243   /// instructions that may divide by zero.
1244   /// If a non-zero VF has been calculated, we check if I will be scalarized
1245   /// predication for that VF.
1246   bool isScalarWithPredication(Instruction *I, unsigned VF = 1);
1247 
1248   // Returns true if \p I is an instruction that will be predicated either
1249   // through scalar predication or masked load/store or masked gather/scatter.
1250   // Superset of instructions that return true for isScalarWithPredication.
1251   bool isPredicatedInst(Instruction *I) {
1252     if (!blockNeedsPredication(I->getParent()))
1253       return false;
1254     // Loads and stores that need some form of masked operation are predicated
1255     // instructions.
1256     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1257       return Legal->isMaskRequired(I);
1258     return isScalarWithPredication(I);
1259   }
1260 
1261   /// Returns true if \p I is a memory instruction with consecutive memory
1262   /// access that can be widened.
1263   bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1);
1264 
1265   /// Returns true if \p I is a memory instruction in an interleaved-group
1266   /// of memory accesses that can be vectorized with wide vector loads/stores
1267   /// and shuffles.
1268   bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1);
1269 
1270   /// Check if \p Instr belongs to any interleaved access group.
1271   bool isAccessInterleaved(Instruction *Instr) {
1272     return InterleaveInfo.isInterleaved(Instr);
1273   }
1274 
1275   /// Get the interleaved access group that \p Instr belongs to.
1276   const InterleaveGroup<Instruction> *
1277   getInterleavedAccessGroup(Instruction *Instr) {
1278     return InterleaveInfo.getInterleaveGroup(Instr);
1279   }
1280 
1281   /// Returns true if an interleaved group requires a scalar iteration
1282   /// to handle accesses with gaps, and there is nothing preventing us from
1283   /// creating a scalar epilogue.
1284   bool requiresScalarEpilogue() const {
1285     return isScalarEpilogueAllowed() && InterleaveInfo.requiresScalarEpilogue();
1286   }
1287 
1288   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1289   /// loop hint annotation.
1290   bool isScalarEpilogueAllowed() const {
1291     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1292   }
1293 
1294   /// Returns true if all loop blocks should be masked to fold tail loop.
1295   bool foldTailByMasking() const { return FoldTailByMasking; }
1296 
1297   bool blockNeedsPredication(BasicBlock *BB) {
1298     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1299   }
1300 
1301   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1302   /// with factor VF.  Return the cost of the instruction, including
1303   /// scalarization overhead if it's needed.
1304   unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF);
1305 
1306   /// Estimate cost of a call instruction CI if it were vectorized with factor
1307   /// VF. Return the cost of the instruction, including scalarization overhead
1308   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1309   /// scalarized -
1310   /// i.e. either vector version isn't available, or is too expensive.
1311   unsigned getVectorCallCost(CallInst *CI, unsigned VF, bool &NeedToScalarize);
1312 
1313 private:
1314   unsigned NumPredStores = 0;
1315 
1316   /// \return An upper bound for the vectorization factor, larger than zero.
1317   /// One is returned if vectorization should best be avoided due to cost.
1318   unsigned computeFeasibleMaxVF(unsigned ConstTripCount);
1319 
1320   /// The vectorization cost is a combination of the cost itself and a boolean
1321   /// indicating whether any of the contributing operations will actually
1322   /// operate on
1323   /// vector values after type legalization in the backend. If this latter value
1324   /// is
1325   /// false, then all operations will be scalarized (i.e. no vectorization has
1326   /// actually taken place).
1327   using VectorizationCostTy = std::pair<unsigned, bool>;
1328 
1329   /// Returns the expected execution cost. The unit of the cost does
1330   /// not matter because we use the 'cost' units to compare different
1331   /// vector widths. The cost that is returned is *not* normalized by
1332   /// the factor width.
1333   VectorizationCostTy expectedCost(unsigned VF);
1334 
1335   /// Returns the execution time cost of an instruction for a given vector
1336   /// width. Vector width of one means scalar.
1337   VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF);
1338 
1339   /// The cost-computation logic from getInstructionCost which provides
1340   /// the vector type as an output parameter.
1341   unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy);
1342 
1343   /// Calculate vectorization cost of memory instruction \p I.
1344   unsigned getMemoryInstructionCost(Instruction *I, unsigned VF);
1345 
1346   /// The cost computation for scalarized memory instruction.
1347   unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF);
1348 
1349   /// The cost computation for interleaving group of memory instructions.
1350   unsigned getInterleaveGroupCost(Instruction *I, unsigned VF);
1351 
1352   /// The cost computation for Gather/Scatter instruction.
1353   unsigned getGatherScatterCost(Instruction *I, unsigned VF);
1354 
1355   /// The cost computation for widening instruction \p I with consecutive
1356   /// memory access.
1357   unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF);
1358 
1359   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1360   /// Load: scalar load + broadcast.
1361   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1362   /// element)
1363   unsigned getUniformMemOpCost(Instruction *I, unsigned VF);
1364 
1365   /// Estimate the overhead of scalarizing an instruction. This is a
1366   /// convenience wrapper for the type-based getScalarizationOverhead API.
1367   unsigned getScalarizationOverhead(Instruction *I, unsigned VF);
1368 
1369   /// Returns whether the instruction is a load or store and will be a emitted
1370   /// as a vector operation.
1371   bool isConsecutiveLoadOrStore(Instruction *I);
1372 
1373   /// Returns true if an artificially high cost for emulated masked memrefs
1374   /// should be used.
1375   bool useEmulatedMaskMemRefHack(Instruction *I);
1376 
1377   /// Map of scalar integer values to the smallest bitwidth they can be legally
1378   /// represented as. The vector equivalents of these values should be truncated
1379   /// to this type.
1380   MapVector<Instruction *, uint64_t> MinBWs;
1381 
1382   /// A type representing the costs for instructions if they were to be
1383   /// scalarized rather than vectorized. The entries are Instruction-Cost
1384   /// pairs.
1385   using ScalarCostsTy = DenseMap<Instruction *, unsigned>;
1386 
1387   /// A set containing all BasicBlocks that are known to present after
1388   /// vectorization as a predicated block.
1389   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1390 
1391   /// Records whether it is allowed to have the original scalar loop execute at
1392   /// least once. This may be needed as a fallback loop in case runtime
1393   /// aliasing/dependence checks fail, or to handle the tail/remainder
1394   /// iterations when the trip count is unknown or doesn't divide by the VF,
1395   /// or as a peel-loop to handle gaps in interleave-groups.
1396   /// Under optsize and when the trip count is very small we don't allow any
1397   /// iterations to execute in the scalar loop.
1398   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1399 
1400   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1401   bool FoldTailByMasking = false;
1402 
1403   /// A map holding scalar costs for different vectorization factors. The
1404   /// presence of a cost for an instruction in the mapping indicates that the
1405   /// instruction will be scalarized when vectorizing with the associated
1406   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1407   DenseMap<unsigned, ScalarCostsTy> InstsToScalarize;
1408 
1409   /// Holds the instructions known to be uniform after vectorization.
1410   /// The data is collected per VF.
1411   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms;
1412 
1413   /// Holds the instructions known to be scalar after vectorization.
1414   /// The data is collected per VF.
1415   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars;
1416 
1417   /// Holds the instructions (address computations) that are forced to be
1418   /// scalarized.
1419   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1420 
1421   /// Returns the expected difference in cost from scalarizing the expression
1422   /// feeding a predicated instruction \p PredInst. The instructions to
1423   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1424   /// non-negative return value implies the expression will be scalarized.
1425   /// Currently, only single-use chains are considered for scalarization.
1426   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1427                               unsigned VF);
1428 
1429   /// Collect the instructions that are uniform after vectorization. An
1430   /// instruction is uniform if we represent it with a single scalar value in
1431   /// the vectorized loop corresponding to each vector iteration. Examples of
1432   /// uniform instructions include pointer operands of consecutive or
1433   /// interleaved memory accesses. Note that although uniformity implies an
1434   /// instruction will be scalar, the reverse is not true. In general, a
1435   /// scalarized instruction will be represented by VF scalar values in the
1436   /// vectorized loop, each corresponding to an iteration of the original
1437   /// scalar loop.
1438   void collectLoopUniforms(unsigned VF);
1439 
1440   /// Collect the instructions that are scalar after vectorization. An
1441   /// instruction is scalar if it is known to be uniform or will be scalarized
1442   /// during vectorization. Non-uniform scalarized instructions will be
1443   /// represented by VF values in the vectorized loop, each corresponding to an
1444   /// iteration of the original scalar loop.
1445   void collectLoopScalars(unsigned VF);
1446 
1447   /// Keeps cost model vectorization decision and cost for instructions.
1448   /// Right now it is used for memory instructions only.
1449   using DecisionList = DenseMap<std::pair<Instruction *, unsigned>,
1450                                 std::pair<InstWidening, unsigned>>;
1451 
1452   DecisionList WideningDecisions;
1453 
1454   /// Returns true if \p V is expected to be vectorized and it needs to be
1455   /// extracted.
1456   bool needsExtract(Value *V, unsigned VF) const {
1457     Instruction *I = dyn_cast<Instruction>(V);
1458     if (VF == 1 || !I || !TheLoop->contains(I) || TheLoop->isLoopInvariant(I))
1459       return false;
1460 
1461     // Assume we can vectorize V (and hence we need extraction) if the
1462     // scalars are not computed yet. This can happen, because it is called
1463     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1464     // the scalars are collected. That should be a safe assumption in most
1465     // cases, because we check if the operands have vectorizable types
1466     // beforehand in LoopVectorizationLegality.
1467     return Scalars.find(VF) == Scalars.end() ||
1468            !isScalarAfterVectorization(I, VF);
1469   };
1470 
1471   /// Returns a range containing only operands needing to be extracted.
1472   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1473                                                    unsigned VF) {
1474     return SmallVector<Value *, 4>(make_filter_range(
1475         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1476   }
1477 
1478 public:
1479   /// The loop that we evaluate.
1480   Loop *TheLoop;
1481 
1482   /// Predicated scalar evolution analysis.
1483   PredicatedScalarEvolution &PSE;
1484 
1485   /// Loop Info analysis.
1486   LoopInfo *LI;
1487 
1488   /// Vectorization legality.
1489   LoopVectorizationLegality *Legal;
1490 
1491   /// Vector target information.
1492   const TargetTransformInfo &TTI;
1493 
1494   /// Target Library Info.
1495   const TargetLibraryInfo *TLI;
1496 
1497   /// Demanded bits analysis.
1498   DemandedBits *DB;
1499 
1500   /// Assumption cache.
1501   AssumptionCache *AC;
1502 
1503   /// Interface to emit optimization remarks.
1504   OptimizationRemarkEmitter *ORE;
1505 
1506   const Function *TheFunction;
1507 
1508   /// Loop Vectorize Hint.
1509   const LoopVectorizeHints *Hints;
1510 
1511   /// The interleave access information contains groups of interleaved accesses
1512   /// with the same stride and close to each other.
1513   InterleavedAccessInfo &InterleaveInfo;
1514 
1515   /// Values to ignore in the cost model.
1516   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1517 
1518   /// Values to ignore in the cost model when VF > 1.
1519   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1520 };
1521 
1522 } // end namespace llvm
1523 
1524 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
1525 // vectorization. The loop needs to be annotated with #pragma omp simd
1526 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
1527 // vector length information is not provided, vectorization is not considered
1528 // explicit. Interleave hints are not allowed either. These limitations will be
1529 // relaxed in the future.
1530 // Please, note that we are currently forced to abuse the pragma 'clang
1531 // vectorize' semantics. This pragma provides *auto-vectorization hints*
1532 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
1533 // provides *explicit vectorization hints* (LV can bypass legal checks and
1534 // assume that vectorization is legal). However, both hints are implemented
1535 // using the same metadata (llvm.loop.vectorize, processed by
1536 // LoopVectorizeHints). This will be fixed in the future when the native IR
1537 // representation for pragma 'omp simd' is introduced.
1538 static bool isExplicitVecOuterLoop(Loop *OuterLp,
1539                                    OptimizationRemarkEmitter *ORE) {
1540   assert(!OuterLp->empty() && "This is not an outer loop");
1541   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
1542 
1543   // Only outer loops with an explicit vectorization hint are supported.
1544   // Unannotated outer loops are ignored.
1545   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
1546     return false;
1547 
1548   Function *Fn = OuterLp->getHeader()->getParent();
1549   if (!Hints.allowVectorization(Fn, OuterLp,
1550                                 true /*VectorizeOnlyWhenForced*/)) {
1551     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
1552     return false;
1553   }
1554 
1555   if (Hints.getInterleave() > 1) {
1556     // TODO: Interleave support is future work.
1557     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
1558                          "outer loops.\n");
1559     Hints.emitRemarkWithHints();
1560     return false;
1561   }
1562 
1563   return true;
1564 }
1565 
1566 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
1567                                   OptimizationRemarkEmitter *ORE,
1568                                   SmallVectorImpl<Loop *> &V) {
1569   // Collect inner loops and outer loops without irreducible control flow. For
1570   // now, only collect outer loops that have explicit vectorization hints. If we
1571   // are stress testing the VPlan H-CFG construction, we collect the outermost
1572   // loop of every loop nest.
1573   if (L.empty() || VPlanBuildStressTest ||
1574       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
1575     LoopBlocksRPO RPOT(&L);
1576     RPOT.perform(LI);
1577     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
1578       V.push_back(&L);
1579       // TODO: Collect inner loops inside marked outer loops in case
1580       // vectorization fails for the outer loop. Do not invoke
1581       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
1582       // already known to be reducible. We can use an inherited attribute for
1583       // that.
1584       return;
1585     }
1586   }
1587   for (Loop *InnerL : L)
1588     collectSupportedLoops(*InnerL, LI, ORE, V);
1589 }
1590 
1591 namespace {
1592 
1593 /// The LoopVectorize Pass.
1594 struct LoopVectorize : public FunctionPass {
1595   /// Pass identification, replacement for typeid
1596   static char ID;
1597 
1598   LoopVectorizePass Impl;
1599 
1600   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
1601                          bool VectorizeOnlyWhenForced = false)
1602       : FunctionPass(ID) {
1603     Impl.InterleaveOnlyWhenForced = InterleaveOnlyWhenForced;
1604     Impl.VectorizeOnlyWhenForced = VectorizeOnlyWhenForced;
1605     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
1606   }
1607 
1608   bool runOnFunction(Function &F) override {
1609     if (skipFunction(F))
1610       return false;
1611 
1612     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1613     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1614     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1615     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1616     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
1617     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1618     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
1619     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1620     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1621     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
1622     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
1623     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
1624     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
1625 
1626     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
1627         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
1628 
1629     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
1630                         GetLAA, *ORE, PSI);
1631   }
1632 
1633   void getAnalysisUsage(AnalysisUsage &AU) const override {
1634     AU.addRequired<AssumptionCacheTracker>();
1635     AU.addRequired<BlockFrequencyInfoWrapperPass>();
1636     AU.addRequired<DominatorTreeWrapperPass>();
1637     AU.addRequired<LoopInfoWrapperPass>();
1638     AU.addRequired<ScalarEvolutionWrapperPass>();
1639     AU.addRequired<TargetTransformInfoWrapperPass>();
1640     AU.addRequired<AAResultsWrapperPass>();
1641     AU.addRequired<LoopAccessLegacyAnalysis>();
1642     AU.addRequired<DemandedBitsWrapperPass>();
1643     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
1644     AU.addRequired<InjectTLIMappingsLegacy>();
1645 
1646     // We currently do not preserve loopinfo/dominator analyses with outer loop
1647     // vectorization. Until this is addressed, mark these analyses as preserved
1648     // only for non-VPlan-native path.
1649     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
1650     if (!EnableVPlanNativePath) {
1651       AU.addPreserved<LoopInfoWrapperPass>();
1652       AU.addPreserved<DominatorTreeWrapperPass>();
1653     }
1654 
1655     AU.addPreserved<BasicAAWrapperPass>();
1656     AU.addPreserved<GlobalsAAWrapperPass>();
1657     AU.addRequired<ProfileSummaryInfoWrapperPass>();
1658   }
1659 };
1660 
1661 } // end anonymous namespace
1662 
1663 //===----------------------------------------------------------------------===//
1664 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
1665 // LoopVectorizationCostModel and LoopVectorizationPlanner.
1666 //===----------------------------------------------------------------------===//
1667 
1668 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
1669   // We need to place the broadcast of invariant variables outside the loop,
1670   // but only if it's proven safe to do so. Else, broadcast will be inside
1671   // vector loop body.
1672   Instruction *Instr = dyn_cast<Instruction>(V);
1673   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
1674                      (!Instr ||
1675                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
1676   // Place the code for broadcasting invariant variables in the new preheader.
1677   IRBuilder<>::InsertPointGuard Guard(Builder);
1678   if (SafeToHoist)
1679     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1680 
1681   // Broadcast the scalar into all locations in the vector.
1682   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
1683 
1684   return Shuf;
1685 }
1686 
1687 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
1688     const InductionDescriptor &II, Value *Step, Instruction *EntryVal) {
1689   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1690          "Expected either an induction phi-node or a truncate of it!");
1691   Value *Start = II.getStartValue();
1692 
1693   // Construct the initial value of the vector IV in the vector loop preheader
1694   auto CurrIP = Builder.saveIP();
1695   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1696   if (isa<TruncInst>(EntryVal)) {
1697     assert(Start->getType()->isIntegerTy() &&
1698            "Truncation requires an integer type");
1699     auto *TruncType = cast<IntegerType>(EntryVal->getType());
1700     Step = Builder.CreateTrunc(Step, TruncType);
1701     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
1702   }
1703   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
1704   Value *SteppedStart =
1705       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
1706 
1707   // We create vector phi nodes for both integer and floating-point induction
1708   // variables. Here, we determine the kind of arithmetic we will perform.
1709   Instruction::BinaryOps AddOp;
1710   Instruction::BinaryOps MulOp;
1711   if (Step->getType()->isIntegerTy()) {
1712     AddOp = Instruction::Add;
1713     MulOp = Instruction::Mul;
1714   } else {
1715     AddOp = II.getInductionOpcode();
1716     MulOp = Instruction::FMul;
1717   }
1718 
1719   // Multiply the vectorization factor by the step using integer or
1720   // floating-point arithmetic as appropriate.
1721   Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF);
1722   Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
1723 
1724   // Create a vector splat to use in the induction update.
1725   //
1726   // FIXME: If the step is non-constant, we create the vector splat with
1727   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
1728   //        handle a constant vector splat.
1729   Value *SplatVF = isa<Constant>(Mul)
1730                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
1731                        : Builder.CreateVectorSplat(VF, Mul);
1732   Builder.restoreIP(CurrIP);
1733 
1734   // We may need to add the step a number of times, depending on the unroll
1735   // factor. The last of those goes into the PHI.
1736   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
1737                                     &*LoopVectorBody->getFirstInsertionPt());
1738   VecInd->setDebugLoc(EntryVal->getDebugLoc());
1739   Instruction *LastInduction = VecInd;
1740   for (unsigned Part = 0; Part < UF; ++Part) {
1741     VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction);
1742 
1743     if (isa<TruncInst>(EntryVal))
1744       addMetadata(LastInduction, EntryVal);
1745     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part);
1746 
1747     LastInduction = cast<Instruction>(addFastMathFlag(
1748         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
1749     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
1750   }
1751 
1752   // Move the last step to the end of the latch block. This ensures consistent
1753   // placement of all induction updates.
1754   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
1755   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
1756   auto *ICmp = cast<Instruction>(Br->getCondition());
1757   LastInduction->moveBefore(ICmp);
1758   LastInduction->setName("vec.ind.next");
1759 
1760   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
1761   VecInd->addIncoming(LastInduction, LoopVectorLatch);
1762 }
1763 
1764 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
1765   return Cost->isScalarAfterVectorization(I, VF) ||
1766          Cost->isProfitableToScalarize(I, VF);
1767 }
1768 
1769 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
1770   if (shouldScalarizeInstruction(IV))
1771     return true;
1772   auto isScalarInst = [&](User *U) -> bool {
1773     auto *I = cast<Instruction>(U);
1774     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
1775   };
1776   return llvm::any_of(IV->users(), isScalarInst);
1777 }
1778 
1779 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
1780     const InductionDescriptor &ID, const Instruction *EntryVal,
1781     Value *VectorLoopVal, unsigned Part, unsigned Lane) {
1782   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1783          "Expected either an induction phi-node or a truncate of it!");
1784 
1785   // This induction variable is not the phi from the original loop but the
1786   // newly-created IV based on the proof that casted Phi is equal to the
1787   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
1788   // re-uses the same InductionDescriptor that original IV uses but we don't
1789   // have to do any recording in this case - that is done when original IV is
1790   // processed.
1791   if (isa<TruncInst>(EntryVal))
1792     return;
1793 
1794   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
1795   if (Casts.empty())
1796     return;
1797   // Only the first Cast instruction in the Casts vector is of interest.
1798   // The rest of the Casts (if exist) have no uses outside the
1799   // induction update chain itself.
1800   Instruction *CastInst = *Casts.begin();
1801   if (Lane < UINT_MAX)
1802     VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal);
1803   else
1804     VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal);
1805 }
1806 
1807 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) {
1808   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
1809          "Primary induction variable must have an integer type");
1810 
1811   auto II = Legal->getInductionVars()->find(IV);
1812   assert(II != Legal->getInductionVars()->end() && "IV is not an induction");
1813 
1814   auto ID = II->second;
1815   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
1816 
1817   // The scalar value to broadcast. This will be derived from the canonical
1818   // induction variable.
1819   Value *ScalarIV = nullptr;
1820 
1821   // The value from the original loop to which we are mapping the new induction
1822   // variable.
1823   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
1824 
1825   // True if we have vectorized the induction variable.
1826   auto VectorizedIV = false;
1827 
1828   // Determine if we want a scalar version of the induction variable. This is
1829   // true if the induction variable itself is not widened, or if it has at
1830   // least one user in the loop that is not widened.
1831   auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal);
1832 
1833   // Generate code for the induction step. Note that induction steps are
1834   // required to be loop-invariant
1835   assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) &&
1836          "Induction step should be loop invariant");
1837   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
1838   Value *Step = nullptr;
1839   if (PSE.getSE()->isSCEVable(IV->getType())) {
1840     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
1841     Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(),
1842                              LoopVectorPreHeader->getTerminator());
1843   } else {
1844     Step = cast<SCEVUnknown>(ID.getStep())->getValue();
1845   }
1846 
1847   // Try to create a new independent vector induction variable. If we can't
1848   // create the phi node, we will splat the scalar induction variable in each
1849   // loop iteration.
1850   if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) {
1851     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
1852     VectorizedIV = true;
1853   }
1854 
1855   // If we haven't yet vectorized the induction variable, or if we will create
1856   // a scalar one, we need to define the scalar induction variable and step
1857   // values. If we were given a truncation type, truncate the canonical
1858   // induction variable and step. Otherwise, derive these values from the
1859   // induction descriptor.
1860   if (!VectorizedIV || NeedsScalarIV) {
1861     ScalarIV = Induction;
1862     if (IV != OldInduction) {
1863       ScalarIV = IV->getType()->isIntegerTy()
1864                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
1865                      : Builder.CreateCast(Instruction::SIToFP, Induction,
1866                                           IV->getType());
1867       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
1868       ScalarIV->setName("offset.idx");
1869     }
1870     if (Trunc) {
1871       auto *TruncType = cast<IntegerType>(Trunc->getType());
1872       assert(Step->getType()->isIntegerTy() &&
1873              "Truncation requires an integer step");
1874       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
1875       Step = Builder.CreateTrunc(Step, TruncType);
1876     }
1877   }
1878 
1879   // If we haven't yet vectorized the induction variable, splat the scalar
1880   // induction variable, and build the necessary step vectors.
1881   // TODO: Don't do it unless the vectorized IV is really required.
1882   if (!VectorizedIV) {
1883     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
1884     for (unsigned Part = 0; Part < UF; ++Part) {
1885       Value *EntryPart =
1886           getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode());
1887       VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart);
1888       if (Trunc)
1889         addMetadata(EntryPart, Trunc);
1890       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part);
1891     }
1892   }
1893 
1894   // If an induction variable is only used for counting loop iterations or
1895   // calculating addresses, it doesn't need to be widened. Create scalar steps
1896   // that can be used by instructions we will later scalarize. Note that the
1897   // addition of the scalar steps will not increase the number of instructions
1898   // in the loop in the common case prior to InstCombine. We will be trading
1899   // one vector extract for each scalar step.
1900   if (NeedsScalarIV)
1901     buildScalarSteps(ScalarIV, Step, EntryVal, ID);
1902 }
1903 
1904 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
1905                                           Instruction::BinaryOps BinOp) {
1906   // Create and check the types.
1907   assert(Val->getType()->isVectorTy() && "Must be a vector");
1908   int VLen = Val->getType()->getVectorNumElements();
1909 
1910   Type *STy = Val->getType()->getScalarType();
1911   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
1912          "Induction Step must be an integer or FP");
1913   assert(Step->getType() == STy && "Step has wrong type");
1914 
1915   SmallVector<Constant *, 8> Indices;
1916 
1917   if (STy->isIntegerTy()) {
1918     // Create a vector of consecutive numbers from zero to VF.
1919     for (int i = 0; i < VLen; ++i)
1920       Indices.push_back(ConstantInt::get(STy, StartIdx + i));
1921 
1922     // Add the consecutive indices to the vector value.
1923     Constant *Cv = ConstantVector::get(Indices);
1924     assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
1925     Step = Builder.CreateVectorSplat(VLen, Step);
1926     assert(Step->getType() == Val->getType() && "Invalid step vec");
1927     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
1928     // which can be found from the original scalar operations.
1929     Step = Builder.CreateMul(Cv, Step);
1930     return Builder.CreateAdd(Val, Step, "induction");
1931   }
1932 
1933   // Floating point induction.
1934   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
1935          "Binary Opcode should be specified for FP induction");
1936   // Create a vector of consecutive numbers from zero to VF.
1937   for (int i = 0; i < VLen; ++i)
1938     Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
1939 
1940   // Add the consecutive indices to the vector value.
1941   Constant *Cv = ConstantVector::get(Indices);
1942 
1943   Step = Builder.CreateVectorSplat(VLen, Step);
1944 
1945   // Floating point operations had to be 'fast' to enable the induction.
1946   FastMathFlags Flags;
1947   Flags.setFast();
1948 
1949   Value *MulOp = Builder.CreateFMul(Cv, Step);
1950   if (isa<Instruction>(MulOp))
1951     // Have to check, MulOp may be a constant
1952     cast<Instruction>(MulOp)->setFastMathFlags(Flags);
1953 
1954   Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
1955   if (isa<Instruction>(BOp))
1956     cast<Instruction>(BOp)->setFastMathFlags(Flags);
1957   return BOp;
1958 }
1959 
1960 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
1961                                            Instruction *EntryVal,
1962                                            const InductionDescriptor &ID) {
1963   // We shouldn't have to build scalar steps if we aren't vectorizing.
1964   assert(VF > 1 && "VF should be greater than one");
1965 
1966   // Get the value type and ensure it and the step have the same integer type.
1967   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
1968   assert(ScalarIVTy == Step->getType() &&
1969          "Val and Step should have the same type");
1970 
1971   // We build scalar steps for both integer and floating-point induction
1972   // variables. Here, we determine the kind of arithmetic we will perform.
1973   Instruction::BinaryOps AddOp;
1974   Instruction::BinaryOps MulOp;
1975   if (ScalarIVTy->isIntegerTy()) {
1976     AddOp = Instruction::Add;
1977     MulOp = Instruction::Mul;
1978   } else {
1979     AddOp = ID.getInductionOpcode();
1980     MulOp = Instruction::FMul;
1981   }
1982 
1983   // Determine the number of scalars we need to generate for each unroll
1984   // iteration. If EntryVal is uniform, we only need to generate the first
1985   // lane. Otherwise, we generate all VF values.
1986   unsigned Lanes =
1987       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1
1988                                                                          : VF;
1989   // Compute the scalar steps and save the results in VectorLoopValueMap.
1990   for (unsigned Part = 0; Part < UF; ++Part) {
1991     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
1992       auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane);
1993       auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
1994       auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
1995       VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add);
1996       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane);
1997     }
1998   }
1999 }
2000 
2001 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) {
2002   assert(V != Induction && "The new induction variable should not be used.");
2003   assert(!V->getType()->isVectorTy() && "Can't widen a vector");
2004   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2005 
2006   // If we have a stride that is replaced by one, do it here. Defer this for
2007   // the VPlan-native path until we start running Legal checks in that path.
2008   if (!EnableVPlanNativePath && Legal->hasStride(V))
2009     V = ConstantInt::get(V->getType(), 1);
2010 
2011   // If we have a vector mapped to this value, return it.
2012   if (VectorLoopValueMap.hasVectorValue(V, Part))
2013     return VectorLoopValueMap.getVectorValue(V, Part);
2014 
2015   // If the value has not been vectorized, check if it has been scalarized
2016   // instead. If it has been scalarized, and we actually need the value in
2017   // vector form, we will construct the vector values on demand.
2018   if (VectorLoopValueMap.hasAnyScalarValue(V)) {
2019     Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0});
2020 
2021     // If we've scalarized a value, that value should be an instruction.
2022     auto *I = cast<Instruction>(V);
2023 
2024     // If we aren't vectorizing, we can just copy the scalar map values over to
2025     // the vector map.
2026     if (VF == 1) {
2027       VectorLoopValueMap.setVectorValue(V, Part, ScalarValue);
2028       return ScalarValue;
2029     }
2030 
2031     // Get the last scalar instruction we generated for V and Part. If the value
2032     // is known to be uniform after vectorization, this corresponds to lane zero
2033     // of the Part unroll iteration. Otherwise, the last instruction is the one
2034     // we created for the last vector lane of the Part unroll iteration.
2035     unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1;
2036     auto *LastInst = cast<Instruction>(
2037         VectorLoopValueMap.getScalarValue(V, {Part, LastLane}));
2038 
2039     // Set the insert point after the last scalarized instruction. This ensures
2040     // the insertelement sequence will directly follow the scalar definitions.
2041     auto OldIP = Builder.saveIP();
2042     auto NewIP = std::next(BasicBlock::iterator(LastInst));
2043     Builder.SetInsertPoint(&*NewIP);
2044 
2045     // However, if we are vectorizing, we need to construct the vector values.
2046     // If the value is known to be uniform after vectorization, we can just
2047     // broadcast the scalar value corresponding to lane zero for each unroll
2048     // iteration. Otherwise, we construct the vector values using insertelement
2049     // instructions. Since the resulting vectors are stored in
2050     // VectorLoopValueMap, we will only generate the insertelements once.
2051     Value *VectorValue = nullptr;
2052     if (Cost->isUniformAfterVectorization(I, VF)) {
2053       VectorValue = getBroadcastInstrs(ScalarValue);
2054       VectorLoopValueMap.setVectorValue(V, Part, VectorValue);
2055     } else {
2056       // Initialize packing with insertelements to start from undef.
2057       Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF));
2058       VectorLoopValueMap.setVectorValue(V, Part, Undef);
2059       for (unsigned Lane = 0; Lane < VF; ++Lane)
2060         packScalarIntoVectorValue(V, {Part, Lane});
2061       VectorValue = VectorLoopValueMap.getVectorValue(V, Part);
2062     }
2063     Builder.restoreIP(OldIP);
2064     return VectorValue;
2065   }
2066 
2067   // If this scalar is unknown, assume that it is a constant or that it is
2068   // loop invariant. Broadcast V and save the value for future uses.
2069   Value *B = getBroadcastInstrs(V);
2070   VectorLoopValueMap.setVectorValue(V, Part, B);
2071   return B;
2072 }
2073 
2074 Value *
2075 InnerLoopVectorizer::getOrCreateScalarValue(Value *V,
2076                                             const VPIteration &Instance) {
2077   // If the value is not an instruction contained in the loop, it should
2078   // already be scalar.
2079   if (OrigLoop->isLoopInvariant(V))
2080     return V;
2081 
2082   assert(Instance.Lane > 0
2083              ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
2084              : true && "Uniform values only have lane zero");
2085 
2086   // If the value from the original loop has not been vectorized, it is
2087   // represented by UF x VF scalar values in the new loop. Return the requested
2088   // scalar value.
2089   if (VectorLoopValueMap.hasScalarValue(V, Instance))
2090     return VectorLoopValueMap.getScalarValue(V, Instance);
2091 
2092   // If the value has not been scalarized, get its entry in VectorLoopValueMap
2093   // for the given unroll part. If this entry is not a vector type (i.e., the
2094   // vectorization factor is one), there is no need to generate an
2095   // extractelement instruction.
2096   auto *U = getOrCreateVectorValue(V, Instance.Part);
2097   if (!U->getType()->isVectorTy()) {
2098     assert(VF == 1 && "Value not scalarized has non-vector type");
2099     return U;
2100   }
2101 
2102   // Otherwise, the value from the original loop has been vectorized and is
2103   // represented by UF vector values. Extract and return the requested scalar
2104   // value from the appropriate vector lane.
2105   return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane));
2106 }
2107 
2108 void InnerLoopVectorizer::packScalarIntoVectorValue(
2109     Value *V, const VPIteration &Instance) {
2110   assert(V != Induction && "The new induction variable should not be used.");
2111   assert(!V->getType()->isVectorTy() && "Can't pack a vector");
2112   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2113 
2114   Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance);
2115   Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part);
2116   VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst,
2117                                             Builder.getInt32(Instance.Lane));
2118   VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue);
2119 }
2120 
2121 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2122   assert(Vec->getType()->isVectorTy() && "Invalid type");
2123   SmallVector<Constant *, 8> ShuffleMask;
2124   for (unsigned i = 0; i < VF; ++i)
2125     ShuffleMask.push_back(Builder.getInt32(VF - i - 1));
2126 
2127   return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
2128                                      ConstantVector::get(ShuffleMask),
2129                                      "reverse");
2130 }
2131 
2132 // Return whether we allow using masked interleave-groups (for dealing with
2133 // strided loads/stores that reside in predicated blocks, or for dealing
2134 // with gaps).
2135 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2136   // If an override option has been passed in for interleaved accesses, use it.
2137   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2138     return EnableMaskedInterleavedMemAccesses;
2139 
2140   return TTI.enableMaskedInterleavedAccessVectorization();
2141 }
2142 
2143 // Try to vectorize the interleave group that \p Instr belongs to.
2144 //
2145 // E.g. Translate following interleaved load group (factor = 3):
2146 //   for (i = 0; i < N; i+=3) {
2147 //     R = Pic[i];             // Member of index 0
2148 //     G = Pic[i+1];           // Member of index 1
2149 //     B = Pic[i+2];           // Member of index 2
2150 //     ... // do something to R, G, B
2151 //   }
2152 // To:
2153 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2154 //   %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9>   ; R elements
2155 //   %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10>  ; G elements
2156 //   %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11>  ; B elements
2157 //
2158 // Or translate following interleaved store group (factor = 3):
2159 //   for (i = 0; i < N; i+=3) {
2160 //     ... do something to R, G, B
2161 //     Pic[i]   = R;           // Member of index 0
2162 //     Pic[i+1] = G;           // Member of index 1
2163 //     Pic[i+2] = B;           // Member of index 2
2164 //   }
2165 // To:
2166 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2167 //   %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u>
2168 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2169 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2170 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2171 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr,
2172                                                    VPTransformState &State,
2173                                                    VPValue *Addr,
2174                                                    VPValue *BlockInMask) {
2175   const InterleaveGroup<Instruction> *Group =
2176       Cost->getInterleavedAccessGroup(Instr);
2177   assert(Group && "Fail to get an interleaved access group.");
2178 
2179   // Skip if current instruction is not the insert position.
2180   if (Instr != Group->getInsertPos())
2181     return;
2182 
2183   const DataLayout &DL = Instr->getModule()->getDataLayout();
2184 
2185   // Prepare for the vector type of the interleaved load/store.
2186   Type *ScalarTy = getMemInstValueType(Instr);
2187   unsigned InterleaveFactor = Group->getFactor();
2188   Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF);
2189 
2190   // Prepare for the new pointers.
2191   SmallVector<Value *, 2> AddrParts;
2192   unsigned Index = Group->getIndex(Instr);
2193 
2194   // TODO: extend the masked interleaved-group support to reversed access.
2195   assert((!BlockInMask || !Group->isReverse()) &&
2196          "Reversed masked interleave-group not supported.");
2197 
2198   // If the group is reverse, adjust the index to refer to the last vector lane
2199   // instead of the first. We adjust the index from the first vector lane,
2200   // rather than directly getting the pointer for lane VF - 1, because the
2201   // pointer operand of the interleaved access is supposed to be uniform. For
2202   // uniform instructions, we're only required to generate a value for the
2203   // first vector lane in each unroll iteration.
2204   if (Group->isReverse())
2205     Index += (VF - 1) * Group->getFactor();
2206 
2207   for (unsigned Part = 0; Part < UF; Part++) {
2208     Value *AddrPart = State.get(Addr, {Part, 0});
2209     setDebugLocFromInst(Builder, AddrPart);
2210 
2211     // Notice current instruction could be any index. Need to adjust the address
2212     // to the member of index 0.
2213     //
2214     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2215     //       b = A[i];       // Member of index 0
2216     // Current pointer is pointed to A[i+1], adjust it to A[i].
2217     //
2218     // E.g.  A[i+1] = a;     // Member of index 1
2219     //       A[i]   = b;     // Member of index 0
2220     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2221     // Current pointer is pointed to A[i+2], adjust it to A[i].
2222 
2223     bool InBounds = false;
2224     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2225       InBounds = gep->isInBounds();
2226     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2227     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2228 
2229     // Cast to the vector pointer type.
2230     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2231     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2232     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2233   }
2234 
2235   setDebugLocFromInst(Builder, Instr);
2236   Value *UndefVec = UndefValue::get(VecTy);
2237 
2238   Value *MaskForGaps = nullptr;
2239   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2240     MaskForGaps = createBitMaskForGaps(Builder, VF, *Group);
2241     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2242   }
2243 
2244   // Vectorize the interleaved load group.
2245   if (isa<LoadInst>(Instr)) {
2246     // For each unroll part, create a wide load for the group.
2247     SmallVector<Value *, 2> NewLoads;
2248     for (unsigned Part = 0; Part < UF; Part++) {
2249       Instruction *NewLoad;
2250       if (BlockInMask || MaskForGaps) {
2251         assert(useMaskedInterleavedAccesses(*TTI) &&
2252                "masked interleaved groups are not allowed.");
2253         Value *GroupMask = MaskForGaps;
2254         if (BlockInMask) {
2255           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2256           auto *Undefs = UndefValue::get(BlockInMaskPart->getType());
2257           auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF);
2258           Value *ShuffledMask = Builder.CreateShuffleVector(
2259               BlockInMaskPart, Undefs, RepMask, "interleaved.mask");
2260           GroupMask = MaskForGaps
2261                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2262                                                 MaskForGaps)
2263                           : ShuffledMask;
2264         }
2265         NewLoad =
2266             Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
2267                                      GroupMask, UndefVec, "wide.masked.vec");
2268       }
2269       else
2270         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2271                                             Group->getAlign(), "wide.vec");
2272       Group->addMetadata(NewLoad);
2273       NewLoads.push_back(NewLoad);
2274     }
2275 
2276     // For each member in the group, shuffle out the appropriate data from the
2277     // wide loads.
2278     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2279       Instruction *Member = Group->getMember(I);
2280 
2281       // Skip the gaps in the group.
2282       if (!Member)
2283         continue;
2284 
2285       Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF);
2286       for (unsigned Part = 0; Part < UF; Part++) {
2287         Value *StridedVec = Builder.CreateShuffleVector(
2288             NewLoads[Part], UndefVec, StrideMask, "strided.vec");
2289 
2290         // If this member has different type, cast the result type.
2291         if (Member->getType() != ScalarTy) {
2292           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2293           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2294         }
2295 
2296         if (Group->isReverse())
2297           StridedVec = reverseVector(StridedVec);
2298 
2299         VectorLoopValueMap.setVectorValue(Member, Part, StridedVec);
2300       }
2301     }
2302     return;
2303   }
2304 
2305   // The sub vector type for current instruction.
2306   VectorType *SubVT = VectorType::get(ScalarTy, VF);
2307 
2308   // Vectorize the interleaved store group.
2309   for (unsigned Part = 0; Part < UF; Part++) {
2310     // Collect the stored vector from each member.
2311     SmallVector<Value *, 4> StoredVecs;
2312     for (unsigned i = 0; i < InterleaveFactor; i++) {
2313       // Interleaved store group doesn't allow a gap, so each index has a member
2314       Instruction *Member = Group->getMember(i);
2315       assert(Member && "Fail to get a member from an interleaved store group");
2316 
2317       Value *StoredVec = getOrCreateVectorValue(
2318           cast<StoreInst>(Member)->getValueOperand(), Part);
2319       if (Group->isReverse())
2320         StoredVec = reverseVector(StoredVec);
2321 
2322       // If this member has different type, cast it to a unified type.
2323 
2324       if (StoredVec->getType() != SubVT)
2325         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2326 
2327       StoredVecs.push_back(StoredVec);
2328     }
2329 
2330     // Concatenate all vectors into a wide vector.
2331     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2332 
2333     // Interleave the elements in the wide vector.
2334     Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor);
2335     Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask,
2336                                               "interleaved.vec");
2337 
2338     Instruction *NewStoreInstr;
2339     if (BlockInMask) {
2340       Value *BlockInMaskPart = State.get(BlockInMask, Part);
2341       auto *Undefs = UndefValue::get(BlockInMaskPart->getType());
2342       auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF);
2343       Value *ShuffledMask = Builder.CreateShuffleVector(
2344           BlockInMaskPart, Undefs, RepMask, "interleaved.mask");
2345       NewStoreInstr = Builder.CreateMaskedStore(
2346           IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2347     }
2348     else
2349       NewStoreInstr =
2350           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2351 
2352     Group->addMetadata(NewStoreInstr);
2353   }
2354 }
2355 
2356 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
2357                                                      VPTransformState &State,
2358                                                      VPValue *Addr,
2359                                                      VPValue *BlockInMask) {
2360   // Attempt to issue a wide load.
2361   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2362   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2363 
2364   assert((LI || SI) && "Invalid Load/Store instruction");
2365 
2366   LoopVectorizationCostModel::InstWidening Decision =
2367       Cost->getWideningDecision(Instr, VF);
2368   assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
2369          "CM decision should be taken at this point");
2370   if (Decision == LoopVectorizationCostModel::CM_Interleave)
2371     return vectorizeInterleaveGroup(Instr, State, Addr, BlockInMask);
2372 
2373   Type *ScalarDataTy = getMemInstValueType(Instr);
2374   Type *DataTy = VectorType::get(ScalarDataTy, VF);
2375   // An alignment of 0 means target abi alignment. We need to use the scalar's
2376   // target abi alignment in such a case.
2377   const DataLayout &DL = Instr->getModule()->getDataLayout();
2378   const Align Alignment =
2379       DL.getValueOrABITypeAlignment(getLoadStoreAlignment(Instr), ScalarDataTy);
2380 
2381   // Determine if the pointer operand of the access is either consecutive or
2382   // reverse consecutive.
2383   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2384   bool ConsecutiveStride =
2385       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2386   bool CreateGatherScatter =
2387       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2388 
2389   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2390   // gather/scatter. Otherwise Decision should have been to Scalarize.
2391   assert((ConsecutiveStride || CreateGatherScatter) &&
2392          "The instruction should be scalarized");
2393   (void)ConsecutiveStride;
2394 
2395   VectorParts BlockInMaskParts(UF);
2396   bool isMaskRequired = BlockInMask;
2397   if (isMaskRequired)
2398     for (unsigned Part = 0; Part < UF; ++Part)
2399       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2400 
2401   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2402     // Calculate the pointer for the specific unroll-part.
2403     GetElementPtrInst *PartPtr = nullptr;
2404 
2405     bool InBounds = false;
2406     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2407       InBounds = gep->isInBounds();
2408 
2409     if (Reverse) {
2410       // If the address is consecutive but reversed, then the
2411       // wide store needs to start at the last vector element.
2412       PartPtr = cast<GetElementPtrInst>(
2413           Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(-Part * VF)));
2414       PartPtr->setIsInBounds(InBounds);
2415       PartPtr = cast<GetElementPtrInst>(
2416           Builder.CreateGEP(ScalarDataTy, PartPtr, Builder.getInt32(1 - VF)));
2417       PartPtr->setIsInBounds(InBounds);
2418       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2419         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2420     } else {
2421       PartPtr = cast<GetElementPtrInst>(
2422           Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(Part * VF)));
2423       PartPtr->setIsInBounds(InBounds);
2424     }
2425 
2426     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2427     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2428   };
2429 
2430   // Handle Stores:
2431   if (SI) {
2432     setDebugLocFromInst(Builder, SI);
2433 
2434     for (unsigned Part = 0; Part < UF; ++Part) {
2435       Instruction *NewSI = nullptr;
2436       Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part);
2437       if (CreateGatherScatter) {
2438         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2439         Value *VectorGep = State.get(Addr, Part);
2440         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2441                                             MaskPart);
2442       } else {
2443         if (Reverse) {
2444           // If we store to reverse consecutive memory locations, then we need
2445           // to reverse the order of elements in the stored value.
2446           StoredVal = reverseVector(StoredVal);
2447           // We don't want to update the value in the map as it might be used in
2448           // another expression. So don't call resetVectorValue(StoredVal).
2449         }
2450         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
2451         if (isMaskRequired)
2452           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2453                                             BlockInMaskParts[Part]);
2454         else
2455           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2456       }
2457       addMetadata(NewSI, SI);
2458     }
2459     return;
2460   }
2461 
2462   // Handle loads.
2463   assert(LI && "Must have a load instruction");
2464   setDebugLocFromInst(Builder, LI);
2465   for (unsigned Part = 0; Part < UF; ++Part) {
2466     Value *NewLI;
2467     if (CreateGatherScatter) {
2468       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2469       Value *VectorGep = State.get(Addr, Part);
2470       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2471                                          nullptr, "wide.masked.gather");
2472       addMetadata(NewLI, LI);
2473     } else {
2474       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
2475       if (isMaskRequired)
2476         NewLI = Builder.CreateMaskedLoad(
2477             VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy),
2478             "wide.masked.load");
2479       else
2480         NewLI =
2481             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
2482 
2483       // Add metadata to the load, but setVectorValue to the reverse shuffle.
2484       addMetadata(NewLI, LI);
2485       if (Reverse)
2486         NewLI = reverseVector(NewLI);
2487     }
2488     VectorLoopValueMap.setVectorValue(Instr, Part, NewLI);
2489   }
2490 }
2491 
2492 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2493                                                const VPIteration &Instance,
2494                                                bool IfPredicateInstr) {
2495   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2496 
2497   setDebugLocFromInst(Builder, Instr);
2498 
2499   // Does this instruction return a value ?
2500   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2501 
2502   Instruction *Cloned = Instr->clone();
2503   if (!IsVoidRetTy)
2504     Cloned->setName(Instr->getName() + ".cloned");
2505 
2506   // Replace the operands of the cloned instructions with their scalar
2507   // equivalents in the new loop.
2508   for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
2509     auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance);
2510     Cloned->setOperand(op, NewOp);
2511   }
2512   addNewMetadata(Cloned, Instr);
2513 
2514   // Place the cloned scalar in the new loop.
2515   Builder.Insert(Cloned);
2516 
2517   // Add the cloned scalar to the scalar map entry.
2518   VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned);
2519 
2520   // If we just cloned a new assumption, add it the assumption cache.
2521   if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
2522     if (II->getIntrinsicID() == Intrinsic::assume)
2523       AC->registerAssumption(II);
2524 
2525   // End if-block.
2526   if (IfPredicateInstr)
2527     PredicatedInstructions.push_back(Cloned);
2528 }
2529 
2530 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
2531                                                       Value *End, Value *Step,
2532                                                       Instruction *DL) {
2533   BasicBlock *Header = L->getHeader();
2534   BasicBlock *Latch = L->getLoopLatch();
2535   // As we're just creating this loop, it's possible no latch exists
2536   // yet. If so, use the header as this will be a single block loop.
2537   if (!Latch)
2538     Latch = Header;
2539 
2540   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
2541   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
2542   setDebugLocFromInst(Builder, OldInst);
2543   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
2544 
2545   Builder.SetInsertPoint(Latch->getTerminator());
2546   setDebugLocFromInst(Builder, OldInst);
2547 
2548   // Create i+1 and fill the PHINode.
2549   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
2550   Induction->addIncoming(Start, L->getLoopPreheader());
2551   Induction->addIncoming(Next, Latch);
2552   // Create the compare.
2553   Value *ICmp = Builder.CreateICmpEQ(Next, End);
2554   Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
2555 
2556   // Now we have two terminators. Remove the old one from the block.
2557   Latch->getTerminator()->eraseFromParent();
2558 
2559   return Induction;
2560 }
2561 
2562 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
2563   if (TripCount)
2564     return TripCount;
2565 
2566   assert(L && "Create Trip Count for null loop.");
2567   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2568   // Find the loop boundaries.
2569   ScalarEvolution *SE = PSE.getSE();
2570   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2571   assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
2572          "Invalid loop count");
2573 
2574   Type *IdxTy = Legal->getWidestInductionType();
2575   assert(IdxTy && "No type for induction");
2576 
2577   // The exit count might have the type of i64 while the phi is i32. This can
2578   // happen if we have an induction variable that is sign extended before the
2579   // compare. The only way that we get a backedge taken count is that the
2580   // induction variable was signed and as such will not overflow. In such a case
2581   // truncation is legal.
2582   if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() >
2583       IdxTy->getPrimitiveSizeInBits())
2584     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2585   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2586 
2587   // Get the total trip count from the count by adding 1.
2588   const SCEV *ExitCount = SE->getAddExpr(
2589       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2590 
2591   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
2592 
2593   // Expand the trip count and place the new instructions in the preheader.
2594   // Notice that the pre-header does not change, only the loop body.
2595   SCEVExpander Exp(*SE, DL, "induction");
2596 
2597   // Count holds the overall loop count (N).
2598   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2599                                 L->getLoopPreheader()->getTerminator());
2600 
2601   if (TripCount->getType()->isPointerTy())
2602     TripCount =
2603         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2604                                     L->getLoopPreheader()->getTerminator());
2605 
2606   return TripCount;
2607 }
2608 
2609 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
2610   if (VectorTripCount)
2611     return VectorTripCount;
2612 
2613   Value *TC = getOrCreateTripCount(L);
2614   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2615 
2616   Type *Ty = TC->getType();
2617   Constant *Step = ConstantInt::get(Ty, VF * UF);
2618 
2619   // If the tail is to be folded by masking, round the number of iterations N
2620   // up to a multiple of Step instead of rounding down. This is done by first
2621   // adding Step-1 and then rounding down. Note that it's ok if this addition
2622   // overflows: the vector induction variable will eventually wrap to zero given
2623   // that it starts at zero and its Step is a power of two; the loop will then
2624   // exit, with the last early-exit vector comparison also producing all-true.
2625   if (Cost->foldTailByMasking()) {
2626     assert(isPowerOf2_32(VF * UF) &&
2627            "VF*UF must be a power of 2 when folding tail by masking");
2628     TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up");
2629   }
2630 
2631   // Now we need to generate the expression for the part of the loop that the
2632   // vectorized body will execute. This is equal to N - (N % Step) if scalar
2633   // iterations are not required for correctness, or N - Step, otherwise. Step
2634   // is equal to the vectorization factor (number of SIMD elements) times the
2635   // unroll factor (number of SIMD instructions).
2636   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2637 
2638   // If there is a non-reversed interleaved group that may speculatively access
2639   // memory out-of-bounds, we need to ensure that there will be at least one
2640   // iteration of the scalar epilogue loop. Thus, if the step evenly divides
2641   // the trip count, we set the remainder to be equal to the step. If the step
2642   // does not evenly divide the trip count, no adjustment is necessary since
2643   // there will already be scalar iterations. Note that the minimum iterations
2644   // check ensures that N >= Step.
2645   if (VF > 1 && Cost->requiresScalarEpilogue()) {
2646     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2647     R = Builder.CreateSelect(IsZero, Step, R);
2648   }
2649 
2650   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2651 
2652   return VectorTripCount;
2653 }
2654 
2655 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
2656                                                    const DataLayout &DL) {
2657   // Verify that V is a vector type with same number of elements as DstVTy.
2658   unsigned VF = DstVTy->getNumElements();
2659   VectorType *SrcVecTy = cast<VectorType>(V->getType());
2660   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
2661   Type *SrcElemTy = SrcVecTy->getElementType();
2662   Type *DstElemTy = DstVTy->getElementType();
2663   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2664          "Vector elements must have same size");
2665 
2666   // Do a direct cast if element types are castable.
2667   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2668     return Builder.CreateBitOrPointerCast(V, DstVTy);
2669   }
2670   // V cannot be directly casted to desired vector type.
2671   // May happen when V is a floating point vector but DstVTy is a vector of
2672   // pointers or vice-versa. Handle this using a two-step bitcast using an
2673   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2674   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2675          "Only one type should be a pointer type");
2676   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2677          "Only one type should be a floating point type");
2678   Type *IntTy =
2679       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2680   VectorType *VecIntTy = VectorType::get(IntTy, VF);
2681   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2682   return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
2683 }
2684 
2685 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
2686                                                          BasicBlock *Bypass) {
2687   Value *Count = getOrCreateTripCount(L);
2688   // Reuse existing vector loop preheader for TC checks.
2689   // Note that new preheader block is generated for vector loop.
2690   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
2691   IRBuilder<> Builder(TCCheckBlock->getTerminator());
2692 
2693   // Generate code to check if the loop's trip count is less than VF * UF, or
2694   // equal to it in case a scalar epilogue is required; this implies that the
2695   // vector trip count is zero. This check also covers the case where adding one
2696   // to the backedge-taken count overflowed leading to an incorrect trip count
2697   // of zero. In this case we will also jump to the scalar loop.
2698   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
2699                                           : ICmpInst::ICMP_ULT;
2700 
2701   // If tail is to be folded, vector loop takes care of all iterations.
2702   Value *CheckMinIters = Builder.getFalse();
2703   if (!Cost->foldTailByMasking())
2704     CheckMinIters = Builder.CreateICmp(
2705         P, Count, ConstantInt::get(Count->getType(), VF * UF),
2706         "min.iters.check");
2707 
2708   // Create new preheader for vector loop.
2709   LoopVectorPreHeader =
2710       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
2711                  "vector.ph");
2712 
2713   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
2714                                DT->getNode(Bypass)->getIDom()) &&
2715          "TC check is expected to dominate Bypass");
2716 
2717   // Update dominator for Bypass & LoopExit.
2718   DT->changeImmediateDominator(Bypass, TCCheckBlock);
2719   DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
2720 
2721   ReplaceInstWithInst(
2722       TCCheckBlock->getTerminator(),
2723       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
2724   LoopBypassBlocks.push_back(TCCheckBlock);
2725 }
2726 
2727 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
2728   // Reuse existing vector loop preheader for SCEV checks.
2729   // Note that new preheader block is generated for vector loop.
2730   BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader;
2731 
2732   // Generate the code to check that the SCEV assumptions that we made.
2733   // We want the new basic block to start at the first instruction in a
2734   // sequence of instructions that form a check.
2735   SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
2736                    "scev.check");
2737   Value *SCEVCheck = Exp.expandCodeForPredicate(
2738       &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator());
2739 
2740   if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
2741     if (C->isZero())
2742       return;
2743 
2744   assert(!SCEVCheckBlock->getParent()->hasOptSize() &&
2745          "Cannot SCEV check stride or overflow when optimizing for size");
2746 
2747   SCEVCheckBlock->setName("vector.scevcheck");
2748   // Create new preheader for vector loop.
2749   LoopVectorPreHeader =
2750       SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI,
2751                  nullptr, "vector.ph");
2752 
2753   // Update dominator only if this is first RT check.
2754   if (LoopBypassBlocks.empty()) {
2755     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
2756     DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
2757   }
2758 
2759   ReplaceInstWithInst(
2760       SCEVCheckBlock->getTerminator(),
2761       BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck));
2762   LoopBypassBlocks.push_back(SCEVCheckBlock);
2763   AddedSafetyChecks = true;
2764 }
2765 
2766 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
2767   // VPlan-native path does not do any analysis for runtime checks currently.
2768   if (EnableVPlanNativePath)
2769     return;
2770 
2771   // Reuse existing vector loop preheader for runtime memory checks.
2772   // Note that new preheader block is generated for vector loop.
2773   BasicBlock *const MemCheckBlock = L->getLoopPreheader();
2774 
2775   // Generate the code that checks in runtime if arrays overlap. We put the
2776   // checks into a separate block to make the more common case of few elements
2777   // faster.
2778   Instruction *FirstCheckInst;
2779   Instruction *MemRuntimeCheck;
2780   std::tie(FirstCheckInst, MemRuntimeCheck) =
2781       Legal->getLAI()->addRuntimeChecks(MemCheckBlock->getTerminator());
2782   if (!MemRuntimeCheck)
2783     return;
2784 
2785   if (MemCheckBlock->getParent()->hasOptSize()) {
2786     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
2787            "Cannot emit memory checks when optimizing for size, unless forced "
2788            "to vectorize.");
2789     ORE->emit([&]() {
2790       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
2791                                         L->getStartLoc(), L->getHeader())
2792              << "Code-size may be reduced by not forcing "
2793                 "vectorization, or by source-code modifications "
2794                 "eliminating the need for runtime checks "
2795                 "(e.g., adding 'restrict').";
2796     });
2797   }
2798 
2799   MemCheckBlock->setName("vector.memcheck");
2800   // Create new preheader for vector loop.
2801   LoopVectorPreHeader =
2802       SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr,
2803                  "vector.ph");
2804 
2805   // Update dominator only if this is first RT check.
2806   if (LoopBypassBlocks.empty()) {
2807     DT->changeImmediateDominator(Bypass, MemCheckBlock);
2808     DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock);
2809   }
2810 
2811   ReplaceInstWithInst(
2812       MemCheckBlock->getTerminator(),
2813       BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheck));
2814   LoopBypassBlocks.push_back(MemCheckBlock);
2815   AddedSafetyChecks = true;
2816 
2817   // We currently don't use LoopVersioning for the actual loop cloning but we
2818   // still use it to add the noalias metadata.
2819   LVer = std::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
2820                                           PSE.getSE());
2821   LVer->prepareNoAliasMetadata();
2822 }
2823 
2824 Value *InnerLoopVectorizer::emitTransformedIndex(
2825     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
2826     const InductionDescriptor &ID) const {
2827 
2828   SCEVExpander Exp(*SE, DL, "induction");
2829   auto Step = ID.getStep();
2830   auto StartValue = ID.getStartValue();
2831   assert(Index->getType() == Step->getType() &&
2832          "Index type does not match StepValue type");
2833 
2834   // Note: the IR at this point is broken. We cannot use SE to create any new
2835   // SCEV and then expand it, hoping that SCEV's simplification will give us
2836   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2837   // lead to various SCEV crashes. So all we can do is to use builder and rely
2838   // on InstCombine for future simplifications. Here we handle some trivial
2839   // cases only.
2840   auto CreateAdd = [&B](Value *X, Value *Y) {
2841     assert(X->getType() == Y->getType() && "Types don't match!");
2842     if (auto *CX = dyn_cast<ConstantInt>(X))
2843       if (CX->isZero())
2844         return Y;
2845     if (auto *CY = dyn_cast<ConstantInt>(Y))
2846       if (CY->isZero())
2847         return X;
2848     return B.CreateAdd(X, Y);
2849   };
2850 
2851   auto CreateMul = [&B](Value *X, Value *Y) {
2852     assert(X->getType() == Y->getType() && "Types don't match!");
2853     if (auto *CX = dyn_cast<ConstantInt>(X))
2854       if (CX->isOne())
2855         return Y;
2856     if (auto *CY = dyn_cast<ConstantInt>(Y))
2857       if (CY->isOne())
2858         return X;
2859     return B.CreateMul(X, Y);
2860   };
2861 
2862   switch (ID.getKind()) {
2863   case InductionDescriptor::IK_IntInduction: {
2864     assert(Index->getType() == StartValue->getType() &&
2865            "Index type does not match StartValue type");
2866     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
2867       return B.CreateSub(StartValue, Index);
2868     auto *Offset = CreateMul(
2869         Index, Exp.expandCodeFor(Step, Index->getType(), &*B.GetInsertPoint()));
2870     return CreateAdd(StartValue, Offset);
2871   }
2872   case InductionDescriptor::IK_PtrInduction: {
2873     assert(isa<SCEVConstant>(Step) &&
2874            "Expected constant step for pointer induction");
2875     return B.CreateGEP(
2876         StartValue->getType()->getPointerElementType(), StartValue,
2877         CreateMul(Index, Exp.expandCodeFor(Step, Index->getType(),
2878                                            &*B.GetInsertPoint())));
2879   }
2880   case InductionDescriptor::IK_FpInduction: {
2881     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2882     auto InductionBinOp = ID.getInductionBinOp();
2883     assert(InductionBinOp &&
2884            (InductionBinOp->getOpcode() == Instruction::FAdd ||
2885             InductionBinOp->getOpcode() == Instruction::FSub) &&
2886            "Original bin op should be defined for FP induction");
2887 
2888     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
2889 
2890     // Floating point operations had to be 'fast' to enable the induction.
2891     FastMathFlags Flags;
2892     Flags.setFast();
2893 
2894     Value *MulExp = B.CreateFMul(StepValue, Index);
2895     if (isa<Instruction>(MulExp))
2896       // We have to check, the MulExp may be a constant.
2897       cast<Instruction>(MulExp)->setFastMathFlags(Flags);
2898 
2899     Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2900                                "induction");
2901     if (isa<Instruction>(BOp))
2902       cast<Instruction>(BOp)->setFastMathFlags(Flags);
2903 
2904     return BOp;
2905   }
2906   case InductionDescriptor::IK_NoInduction:
2907     return nullptr;
2908   }
2909   llvm_unreachable("invalid enum");
2910 }
2911 
2912 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
2913   /*
2914    In this function we generate a new loop. The new loop will contain
2915    the vectorized instructions while the old loop will continue to run the
2916    scalar remainder.
2917 
2918        [ ] <-- loop iteration number check.
2919     /   |
2920    /    v
2921   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
2922   |  /  |
2923   | /   v
2924   ||   [ ]     <-- vector pre header.
2925   |/    |
2926   |     v
2927   |    [  ] \
2928   |    [  ]_|   <-- vector loop.
2929   |     |
2930   |     v
2931   |   -[ ]   <--- middle-block.
2932   |  /  |
2933   | /   v
2934   -|- >[ ]     <--- new preheader.
2935    |    |
2936    |    v
2937    |   [ ] \
2938    |   [ ]_|   <-- old scalar loop to handle remainder.
2939     \   |
2940      \  v
2941       >[ ]     <-- exit block.
2942    ...
2943    */
2944 
2945   MDNode *OrigLoopID = OrigLoop->getLoopID();
2946 
2947   // Some loops have a single integer induction variable, while other loops
2948   // don't. One example is c++ iterators that often have multiple pointer
2949   // induction variables. In the code below we also support a case where we
2950   // don't have a single induction variable.
2951   //
2952   // We try to obtain an induction variable from the original loop as hard
2953   // as possible. However if we don't find one that:
2954   //   - is an integer
2955   //   - counts from zero, stepping by one
2956   //   - is the size of the widest induction variable type
2957   // then we create a new one.
2958   OldInduction = Legal->getPrimaryInduction();
2959   Type *IdxTy = Legal->getWidestInductionType();
2960 
2961   // Split the single block loop into the two loop structure described above.
2962   LoopScalarBody = OrigLoop->getHeader();
2963   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
2964   LoopExitBlock = OrigLoop->getExitBlock();
2965   assert(LoopExitBlock && "Must have an exit block");
2966   assert(LoopVectorPreHeader && "Invalid loop structure");
2967 
2968   LoopMiddleBlock =
2969       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
2970                  LI, nullptr, "middle.block");
2971   LoopScalarPreHeader =
2972       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
2973                  nullptr, "scalar.ph");
2974   // We intentionally don't let SplitBlock to update LoopInfo since
2975   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
2976   // LoopVectorBody is explicitly added to the correct place few lines later.
2977   LoopVectorBody =
2978       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
2979                  nullptr, nullptr, "vector.body");
2980 
2981   // Update dominator for loop exit.
2982   DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
2983 
2984   // Create and register the new vector loop.
2985   Loop *Lp = LI->AllocateLoop();
2986   Loop *ParentLoop = OrigLoop->getParentLoop();
2987 
2988   // Insert the new loop into the loop nest and register the new basic blocks
2989   // before calling any utilities such as SCEV that require valid LoopInfo.
2990   if (ParentLoop) {
2991     ParentLoop->addChildLoop(Lp);
2992   } else {
2993     LI->addTopLevelLoop(Lp);
2994   }
2995   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
2996 
2997   // Find the loop boundaries.
2998   Value *Count = getOrCreateTripCount(Lp);
2999 
3000   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3001 
3002   // Now, compare the new count to zero. If it is zero skip the vector loop and
3003   // jump to the scalar loop. This check also covers the case where the
3004   // backedge-taken count is uint##_max: adding one to it will overflow leading
3005   // to an incorrect trip count of zero. In this (rare) case we will also jump
3006   // to the scalar loop.
3007   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3008 
3009   // Generate the code to check any assumptions that we've made for SCEV
3010   // expressions.
3011   emitSCEVChecks(Lp, LoopScalarPreHeader);
3012 
3013   // Generate the code that checks in runtime if arrays overlap. We put the
3014   // checks into a separate block to make the more common case of few elements
3015   // faster.
3016   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3017 
3018   // Generate the induction variable.
3019   // The loop step is equal to the vectorization factor (num of SIMD elements)
3020   // times the unroll factor (num of SIMD instructions).
3021   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3022   Constant *Step = ConstantInt::get(IdxTy, VF * UF);
3023   Induction =
3024       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3025                               getDebugLocFromInstOrOperands(OldInduction));
3026 
3027   // We are going to resume the execution of the scalar loop.
3028   // Go over all of the induction variables that we found and fix the
3029   // PHIs that are left in the scalar version of the loop.
3030   // The starting values of PHI nodes depend on the counter of the last
3031   // iteration in the vectorized loop.
3032   // If we come from a bypass edge then we need to start from the original
3033   // start value.
3034 
3035   // This variable saves the new starting index for the scalar loop. It is used
3036   // to test if there are any tail iterations left once the vector loop has
3037   // completed.
3038   LoopVectorizationLegality::InductionList *List = Legal->getInductionVars();
3039   for (auto &InductionEntry : *List) {
3040     PHINode *OrigPhi = InductionEntry.first;
3041     InductionDescriptor II = InductionEntry.second;
3042 
3043     // Create phi nodes to merge from the  backedge-taken check block.
3044     PHINode *BCResumeVal =
3045         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3046                         LoopScalarPreHeader->getTerminator());
3047     // Copy original phi DL over to the new one.
3048     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3049     Value *&EndValue = IVEndValues[OrigPhi];
3050     if (OrigPhi == OldInduction) {
3051       // We know what the end value is.
3052       EndValue = CountRoundDown;
3053     } else {
3054       IRBuilder<> B(Lp->getLoopPreheader()->getTerminator());
3055       Type *StepType = II.getStep()->getType();
3056       Instruction::CastOps CastOp =
3057           CastInst::getCastOpcode(CountRoundDown, true, StepType, true);
3058       Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd");
3059       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3060       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3061       EndValue->setName("ind.end");
3062     }
3063 
3064     // The new PHI merges the original incoming value, in case of a bypass,
3065     // or the value at the end of the vectorized loop.
3066     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3067 
3068     // Fix the scalar body counter (PHI node).
3069     // The old induction's phi node in the scalar body needs the truncated
3070     // value.
3071     for (BasicBlock *BB : LoopBypassBlocks)
3072       BCResumeVal->addIncoming(II.getStartValue(), BB);
3073     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3074   }
3075 
3076   // We need the OrigLoop (scalar loop part) latch terminator to help
3077   // produce correct debug info for the middle block BB instructions.
3078   // The legality check stage guarantees that the loop will have a single
3079   // latch.
3080   assert(isa<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()) &&
3081          "Scalar loop latch terminator isn't a branch");
3082   BranchInst *ScalarLatchBr =
3083       cast<BranchInst>(OrigLoop->getLoopLatch()->getTerminator());
3084 
3085   // Add a check in the middle block to see if we have completed
3086   // all of the iterations in the first vector loop.
3087   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3088   // If tail is to be folded, we know we don't need to run the remainder.
3089   Value *CmpN = Builder.getTrue();
3090   if (!Cost->foldTailByMasking()) {
3091     CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
3092                            CountRoundDown, "cmp.n",
3093                            LoopMiddleBlock->getTerminator());
3094 
3095     // Here we use the same DebugLoc as the scalar loop latch branch instead
3096     // of the corresponding compare because they may have ended up with
3097     // different line numbers and we want to avoid awkward line stepping while
3098     // debugging. Eg. if the compare has got a line number inside the loop.
3099     cast<Instruction>(CmpN)->setDebugLoc(ScalarLatchBr->getDebugLoc());
3100   }
3101 
3102   BranchInst *BrInst =
3103       BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, CmpN);
3104   BrInst->setDebugLoc(ScalarLatchBr->getDebugLoc());
3105   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3106 
3107   // Get ready to start creating new instructions into the vectorized body.
3108   assert(LoopVectorPreHeader == Lp->getLoopPreheader() &&
3109          "Inconsistent vector loop preheader");
3110   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3111 
3112   Optional<MDNode *> VectorizedLoopID =
3113       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3114                                       LLVMLoopVectorizeFollowupVectorized});
3115   if (VectorizedLoopID.hasValue()) {
3116     Lp->setLoopID(VectorizedLoopID.getValue());
3117 
3118     // Do not setAlreadyVectorized if loop attributes have been defined
3119     // explicitly.
3120     return LoopVectorPreHeader;
3121   }
3122 
3123   // Keep all loop hints from the original loop on the vector loop (we'll
3124   // replace the vectorizer-specific hints below).
3125   if (MDNode *LID = OrigLoop->getLoopID())
3126     Lp->setLoopID(LID);
3127 
3128   LoopVectorizeHints Hints(Lp, true, *ORE);
3129   Hints.setAlreadyVectorized();
3130 
3131 #ifdef EXPENSIVE_CHECKS
3132   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3133   LI->verify(*DT);
3134 #endif
3135 
3136   return LoopVectorPreHeader;
3137 }
3138 
3139 // Fix up external users of the induction variable. At this point, we are
3140 // in LCSSA form, with all external PHIs that use the IV having one input value,
3141 // coming from the remainder loop. We need those PHIs to also have a correct
3142 // value for the IV when arriving directly from the middle block.
3143 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3144                                        const InductionDescriptor &II,
3145                                        Value *CountRoundDown, Value *EndValue,
3146                                        BasicBlock *MiddleBlock) {
3147   // There are two kinds of external IV usages - those that use the value
3148   // computed in the last iteration (the PHI) and those that use the penultimate
3149   // value (the value that feeds into the phi from the loop latch).
3150   // We allow both, but they, obviously, have different values.
3151 
3152   assert(OrigLoop->getExitBlock() && "Expected a single exit block");
3153 
3154   DenseMap<Value *, Value *> MissingVals;
3155 
3156   // An external user of the last iteration's value should see the value that
3157   // the remainder loop uses to initialize its own IV.
3158   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3159   for (User *U : PostInc->users()) {
3160     Instruction *UI = cast<Instruction>(U);
3161     if (!OrigLoop->contains(UI)) {
3162       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3163       MissingVals[UI] = EndValue;
3164     }
3165   }
3166 
3167   // An external user of the penultimate value need to see EndValue - Step.
3168   // The simplest way to get this is to recompute it from the constituent SCEVs,
3169   // that is Start + (Step * (CRD - 1)).
3170   for (User *U : OrigPhi->users()) {
3171     auto *UI = cast<Instruction>(U);
3172     if (!OrigLoop->contains(UI)) {
3173       const DataLayout &DL =
3174           OrigLoop->getHeader()->getModule()->getDataLayout();
3175       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3176 
3177       IRBuilder<> B(MiddleBlock->getTerminator());
3178       Value *CountMinusOne = B.CreateSub(
3179           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3180       Value *CMO =
3181           !II.getStep()->getType()->isIntegerTy()
3182               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3183                              II.getStep()->getType())
3184               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3185       CMO->setName("cast.cmo");
3186       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3187       Escape->setName("ind.escape");
3188       MissingVals[UI] = Escape;
3189     }
3190   }
3191 
3192   for (auto &I : MissingVals) {
3193     PHINode *PHI = cast<PHINode>(I.first);
3194     // One corner case we have to handle is two IVs "chasing" each-other,
3195     // that is %IV2 = phi [...], [ %IV1, %latch ]
3196     // In this case, if IV1 has an external use, we need to avoid adding both
3197     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3198     // don't already have an incoming value for the middle block.
3199     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3200       PHI->addIncoming(I.second, MiddleBlock);
3201   }
3202 }
3203 
3204 namespace {
3205 
3206 struct CSEDenseMapInfo {
3207   static bool canHandle(const Instruction *I) {
3208     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3209            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3210   }
3211 
3212   static inline Instruction *getEmptyKey() {
3213     return DenseMapInfo<Instruction *>::getEmptyKey();
3214   }
3215 
3216   static inline Instruction *getTombstoneKey() {
3217     return DenseMapInfo<Instruction *>::getTombstoneKey();
3218   }
3219 
3220   static unsigned getHashValue(const Instruction *I) {
3221     assert(canHandle(I) && "Unknown instruction!");
3222     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3223                                                            I->value_op_end()));
3224   }
3225 
3226   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3227     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3228         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3229       return LHS == RHS;
3230     return LHS->isIdenticalTo(RHS);
3231   }
3232 };
3233 
3234 } // end anonymous namespace
3235 
3236 ///Perform cse of induction variable instructions.
3237 static void cse(BasicBlock *BB) {
3238   // Perform simple cse.
3239   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3240   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3241     Instruction *In = &*I++;
3242 
3243     if (!CSEDenseMapInfo::canHandle(In))
3244       continue;
3245 
3246     // Check if we can replace this instruction with any of the
3247     // visited instructions.
3248     if (Instruction *V = CSEMap.lookup(In)) {
3249       In->replaceAllUsesWith(V);
3250       In->eraseFromParent();
3251       continue;
3252     }
3253 
3254     CSEMap[In] = In;
3255   }
3256 }
3257 
3258 unsigned LoopVectorizationCostModel::getVectorCallCost(CallInst *CI,
3259                                                        unsigned VF,
3260                                                        bool &NeedToScalarize) {
3261   Function *F = CI->getCalledFunction();
3262   Type *ScalarRetTy = CI->getType();
3263   SmallVector<Type *, 4> Tys, ScalarTys;
3264   for (auto &ArgOp : CI->arg_operands())
3265     ScalarTys.push_back(ArgOp->getType());
3266 
3267   // Estimate cost of scalarized vector call. The source operands are assumed
3268   // to be vectors, so we need to extract individual elements from there,
3269   // execute VF scalar calls, and then gather the result into the vector return
3270   // value.
3271   unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys);
3272   if (VF == 1)
3273     return ScalarCallCost;
3274 
3275   // Compute corresponding vector type for return value and arguments.
3276   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3277   for (Type *ScalarTy : ScalarTys)
3278     Tys.push_back(ToVectorTy(ScalarTy, VF));
3279 
3280   // Compute costs of unpacking argument values for the scalar calls and
3281   // packing the return values to a vector.
3282   unsigned ScalarizationCost = getScalarizationOverhead(CI, VF);
3283 
3284   unsigned Cost = ScalarCallCost * VF + ScalarizationCost;
3285 
3286   // If we can't emit a vector call for this function, then the currently found
3287   // cost is the cost we need to return.
3288   NeedToScalarize = true;
3289   if (!TLI || CI->isNoBuiltin() || VFDatabase::getMappings(*CI).empty())
3290     return Cost;
3291 
3292   // If the corresponding vector cost is cheaper, return its cost.
3293   unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys);
3294   if (VectorCallCost < Cost) {
3295     NeedToScalarize = false;
3296     return VectorCallCost;
3297   }
3298   return Cost;
3299 }
3300 
3301 unsigned LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3302                                                             unsigned VF) {
3303   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3304   assert(ID && "Expected intrinsic call!");
3305 
3306   FastMathFlags FMF;
3307   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3308     FMF = FPMO->getFastMathFlags();
3309 
3310   SmallVector<Value *, 4> Operands(CI->arg_operands());
3311   return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF);
3312 }
3313 
3314 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3315   auto *I1 = cast<IntegerType>(T1->getVectorElementType());
3316   auto *I2 = cast<IntegerType>(T2->getVectorElementType());
3317   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3318 }
3319 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3320   auto *I1 = cast<IntegerType>(T1->getVectorElementType());
3321   auto *I2 = cast<IntegerType>(T2->getVectorElementType());
3322   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3323 }
3324 
3325 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
3326   // For every instruction `I` in MinBWs, truncate the operands, create a
3327   // truncated version of `I` and reextend its result. InstCombine runs
3328   // later and will remove any ext/trunc pairs.
3329   SmallPtrSet<Value *, 4> Erased;
3330   for (const auto &KV : Cost->getMinimalBitwidths()) {
3331     // If the value wasn't vectorized, we must maintain the original scalar
3332     // type. The absence of the value from VectorLoopValueMap indicates that it
3333     // wasn't vectorized.
3334     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3335       continue;
3336     for (unsigned Part = 0; Part < UF; ++Part) {
3337       Value *I = getOrCreateVectorValue(KV.first, Part);
3338       if (Erased.find(I) != Erased.end() || I->use_empty() ||
3339           !isa<Instruction>(I))
3340         continue;
3341       Type *OriginalTy = I->getType();
3342       Type *ScalarTruncatedTy =
3343           IntegerType::get(OriginalTy->getContext(), KV.second);
3344       Type *TruncatedTy = VectorType::get(ScalarTruncatedTy,
3345                                           OriginalTy->getVectorNumElements());
3346       if (TruncatedTy == OriginalTy)
3347         continue;
3348 
3349       IRBuilder<> B(cast<Instruction>(I));
3350       auto ShrinkOperand = [&](Value *V) -> Value * {
3351         if (auto *ZI = dyn_cast<ZExtInst>(V))
3352           if (ZI->getSrcTy() == TruncatedTy)
3353             return ZI->getOperand(0);
3354         return B.CreateZExtOrTrunc(V, TruncatedTy);
3355       };
3356 
3357       // The actual instruction modification depends on the instruction type,
3358       // unfortunately.
3359       Value *NewI = nullptr;
3360       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3361         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3362                              ShrinkOperand(BO->getOperand(1)));
3363 
3364         // Any wrapping introduced by shrinking this operation shouldn't be
3365         // considered undefined behavior. So, we can't unconditionally copy
3366         // arithmetic wrapping flags to NewI.
3367         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3368       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3369         NewI =
3370             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3371                          ShrinkOperand(CI->getOperand(1)));
3372       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3373         NewI = B.CreateSelect(SI->getCondition(),
3374                               ShrinkOperand(SI->getTrueValue()),
3375                               ShrinkOperand(SI->getFalseValue()));
3376       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3377         switch (CI->getOpcode()) {
3378         default:
3379           llvm_unreachable("Unhandled cast!");
3380         case Instruction::Trunc:
3381           NewI = ShrinkOperand(CI->getOperand(0));
3382           break;
3383         case Instruction::SExt:
3384           NewI = B.CreateSExtOrTrunc(
3385               CI->getOperand(0),
3386               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3387           break;
3388         case Instruction::ZExt:
3389           NewI = B.CreateZExtOrTrunc(
3390               CI->getOperand(0),
3391               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3392           break;
3393         }
3394       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3395         auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements();
3396         auto *O0 = B.CreateZExtOrTrunc(
3397             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3398         auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements();
3399         auto *O1 = B.CreateZExtOrTrunc(
3400             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3401 
3402         NewI = B.CreateShuffleVector(O0, O1, SI->getMask());
3403       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3404         // Don't do anything with the operands, just extend the result.
3405         continue;
3406       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3407         auto Elements = IE->getOperand(0)->getType()->getVectorNumElements();
3408         auto *O0 = B.CreateZExtOrTrunc(
3409             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3410         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3411         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3412       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3413         auto Elements = EE->getOperand(0)->getType()->getVectorNumElements();
3414         auto *O0 = B.CreateZExtOrTrunc(
3415             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3416         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3417       } else {
3418         // If we don't know what to do, be conservative and don't do anything.
3419         continue;
3420       }
3421 
3422       // Lastly, extend the result.
3423       NewI->takeName(cast<Instruction>(I));
3424       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3425       I->replaceAllUsesWith(Res);
3426       cast<Instruction>(I)->eraseFromParent();
3427       Erased.insert(I);
3428       VectorLoopValueMap.resetVectorValue(KV.first, Part, Res);
3429     }
3430   }
3431 
3432   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3433   for (const auto &KV : Cost->getMinimalBitwidths()) {
3434     // If the value wasn't vectorized, we must maintain the original scalar
3435     // type. The absence of the value from VectorLoopValueMap indicates that it
3436     // wasn't vectorized.
3437     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3438       continue;
3439     for (unsigned Part = 0; Part < UF; ++Part) {
3440       Value *I = getOrCreateVectorValue(KV.first, Part);
3441       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3442       if (Inst && Inst->use_empty()) {
3443         Value *NewI = Inst->getOperand(0);
3444         Inst->eraseFromParent();
3445         VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI);
3446       }
3447     }
3448   }
3449 }
3450 
3451 void InnerLoopVectorizer::fixVectorizedLoop() {
3452   // Insert truncates and extends for any truncated instructions as hints to
3453   // InstCombine.
3454   if (VF > 1)
3455     truncateToMinimalBitwidths();
3456 
3457   // Fix widened non-induction PHIs by setting up the PHI operands.
3458   if (OrigPHIsToFix.size()) {
3459     assert(EnableVPlanNativePath &&
3460            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3461     fixNonInductionPHIs();
3462   }
3463 
3464   // At this point every instruction in the original loop is widened to a
3465   // vector form. Now we need to fix the recurrences in the loop. These PHI
3466   // nodes are currently empty because we did not want to introduce cycles.
3467   // This is the second stage of vectorizing recurrences.
3468   fixCrossIterationPHIs();
3469 
3470   // Forget the original basic block.
3471   PSE.getSE()->forgetLoop(OrigLoop);
3472 
3473   // Fix-up external users of the induction variables.
3474   for (auto &Entry : *Legal->getInductionVars())
3475     fixupIVUsers(Entry.first, Entry.second,
3476                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3477                  IVEndValues[Entry.first], LoopMiddleBlock);
3478 
3479   fixLCSSAPHIs();
3480   for (Instruction *PI : PredicatedInstructions)
3481     sinkScalarOperands(&*PI);
3482 
3483   // Remove redundant induction instructions.
3484   cse(LoopVectorBody);
3485 
3486   // Set/update profile weights for the vector and remainder loops as original
3487   // loop iterations are now distributed among them. Note that original loop
3488   // represented by LoopScalarBody becomes remainder loop after vectorization.
3489   //
3490   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
3491   // end up getting slightly roughened result but that should be OK since
3492   // profile is not inherently precise anyway. Note also possible bypass of
3493   // vector code caused by legality checks is ignored, assigning all the weight
3494   // to the vector loop, optimistically.
3495   setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody),
3496                                LI->getLoopFor(LoopVectorBody),
3497                                LI->getLoopFor(LoopScalarBody), VF * UF);
3498 }
3499 
3500 void InnerLoopVectorizer::fixCrossIterationPHIs() {
3501   // In order to support recurrences we need to be able to vectorize Phi nodes.
3502   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3503   // stage #2: We now need to fix the recurrences by adding incoming edges to
3504   // the currently empty PHI nodes. At this point every instruction in the
3505   // original loop is widened to a vector form so we can use them to construct
3506   // the incoming edges.
3507   for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
3508     // Handle first-order recurrences and reductions that need to be fixed.
3509     if (Legal->isFirstOrderRecurrence(&Phi))
3510       fixFirstOrderRecurrence(&Phi);
3511     else if (Legal->isReductionVariable(&Phi))
3512       fixReduction(&Phi);
3513   }
3514 }
3515 
3516 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
3517   // This is the second phase of vectorizing first-order recurrences. An
3518   // overview of the transformation is described below. Suppose we have the
3519   // following loop.
3520   //
3521   //   for (int i = 0; i < n; ++i)
3522   //     b[i] = a[i] - a[i - 1];
3523   //
3524   // There is a first-order recurrence on "a". For this loop, the shorthand
3525   // scalar IR looks like:
3526   //
3527   //   scalar.ph:
3528   //     s_init = a[-1]
3529   //     br scalar.body
3530   //
3531   //   scalar.body:
3532   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3533   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3534   //     s2 = a[i]
3535   //     b[i] = s2 - s1
3536   //     br cond, scalar.body, ...
3537   //
3538   // In this example, s1 is a recurrence because it's value depends on the
3539   // previous iteration. In the first phase of vectorization, we created a
3540   // temporary value for s1. We now complete the vectorization and produce the
3541   // shorthand vector IR shown below (for VF = 4, UF = 1).
3542   //
3543   //   vector.ph:
3544   //     v_init = vector(..., ..., ..., a[-1])
3545   //     br vector.body
3546   //
3547   //   vector.body
3548   //     i = phi [0, vector.ph], [i+4, vector.body]
3549   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
3550   //     v2 = a[i, i+1, i+2, i+3];
3551   //     v3 = vector(v1(3), v2(0, 1, 2))
3552   //     b[i, i+1, i+2, i+3] = v2 - v3
3553   //     br cond, vector.body, middle.block
3554   //
3555   //   middle.block:
3556   //     x = v2(3)
3557   //     br scalar.ph
3558   //
3559   //   scalar.ph:
3560   //     s_init = phi [x, middle.block], [a[-1], otherwise]
3561   //     br scalar.body
3562   //
3563   // After execution completes the vector loop, we extract the next value of
3564   // the recurrence (x) to use as the initial value in the scalar loop.
3565 
3566   // Get the original loop preheader and single loop latch.
3567   auto *Preheader = OrigLoop->getLoopPreheader();
3568   auto *Latch = OrigLoop->getLoopLatch();
3569 
3570   // Get the initial and previous values of the scalar recurrence.
3571   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
3572   auto *Previous = Phi->getIncomingValueForBlock(Latch);
3573 
3574   // Create a vector from the initial value.
3575   auto *VectorInit = ScalarInit;
3576   if (VF > 1) {
3577     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3578     VectorInit = Builder.CreateInsertElement(
3579         UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
3580         Builder.getInt32(VF - 1), "vector.recur.init");
3581   }
3582 
3583   // We constructed a temporary phi node in the first phase of vectorization.
3584   // This phi node will eventually be deleted.
3585   Builder.SetInsertPoint(
3586       cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0)));
3587 
3588   // Create a phi node for the new recurrence. The current value will either be
3589   // the initial value inserted into a vector or loop-varying vector value.
3590   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
3591   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
3592 
3593   // Get the vectorized previous value of the last part UF - 1. It appears last
3594   // among all unrolled iterations, due to the order of their construction.
3595   Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1);
3596 
3597   // Find and set the insertion point after the previous value if it is an
3598   // instruction.
3599   BasicBlock::iterator InsertPt;
3600   // Note that the previous value may have been constant-folded so it is not
3601   // guaranteed to be an instruction in the vector loop.
3602   // FIXME: Loop invariant values do not form recurrences. We should deal with
3603   //        them earlier.
3604   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
3605     InsertPt = LoopVectorBody->getFirstInsertionPt();
3606   else {
3607     Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
3608     if (isa<PHINode>(PreviousLastPart))
3609       // If the previous value is a phi node, we should insert after all the phi
3610       // nodes in the block containing the PHI to avoid breaking basic block
3611       // verification. Note that the basic block may be different to
3612       // LoopVectorBody, in case we predicate the loop.
3613       InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
3614     else
3615       InsertPt = ++PreviousInst->getIterator();
3616   }
3617   Builder.SetInsertPoint(&*InsertPt);
3618 
3619   // We will construct a vector for the recurrence by combining the values for
3620   // the current and previous iterations. This is the required shuffle mask.
3621   SmallVector<Constant *, 8> ShuffleMask(VF);
3622   ShuffleMask[0] = Builder.getInt32(VF - 1);
3623   for (unsigned I = 1; I < VF; ++I)
3624     ShuffleMask[I] = Builder.getInt32(I + VF - 1);
3625 
3626   // The vector from which to take the initial value for the current iteration
3627   // (actual or unrolled). Initially, this is the vector phi node.
3628   Value *Incoming = VecPhi;
3629 
3630   // Shuffle the current and previous vector and update the vector parts.
3631   for (unsigned Part = 0; Part < UF; ++Part) {
3632     Value *PreviousPart = getOrCreateVectorValue(Previous, Part);
3633     Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part);
3634     auto *Shuffle =
3635         VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart,
3636                                              ConstantVector::get(ShuffleMask))
3637                : Incoming;
3638     PhiPart->replaceAllUsesWith(Shuffle);
3639     cast<Instruction>(PhiPart)->eraseFromParent();
3640     VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle);
3641     Incoming = PreviousPart;
3642   }
3643 
3644   // Fix the latch value of the new recurrence in the vector loop.
3645   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3646 
3647   // Extract the last vector element in the middle block. This will be the
3648   // initial value for the recurrence when jumping to the scalar loop.
3649   auto *ExtractForScalar = Incoming;
3650   if (VF > 1) {
3651     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3652     ExtractForScalar = Builder.CreateExtractElement(
3653         ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract");
3654   }
3655   // Extract the second last element in the middle block if the
3656   // Phi is used outside the loop. We need to extract the phi itself
3657   // and not the last element (the phi update in the current iteration). This
3658   // will be the value when jumping to the exit block from the LoopMiddleBlock,
3659   // when the scalar loop is not run at all.
3660   Value *ExtractForPhiUsedOutsideLoop = nullptr;
3661   if (VF > 1)
3662     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
3663         Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi");
3664   // When loop is unrolled without vectorizing, initialize
3665   // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
3666   // `Incoming`. This is analogous to the vectorized case above: extracting the
3667   // second last element when VF > 1.
3668   else if (UF > 1)
3669     ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2);
3670 
3671   // Fix the initial value of the original recurrence in the scalar loop.
3672   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
3673   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
3674   for (auto *BB : predecessors(LoopScalarPreHeader)) {
3675     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
3676     Start->addIncoming(Incoming, BB);
3677   }
3678 
3679   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
3680   Phi->setName("scalar.recur");
3681 
3682   // Finally, fix users of the recurrence outside the loop. The users will need
3683   // either the last value of the scalar recurrence or the last value of the
3684   // vector recurrence we extracted in the middle block. Since the loop is in
3685   // LCSSA form, we just need to find all the phi nodes for the original scalar
3686   // recurrence in the exit block, and then add an edge for the middle block.
3687   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3688     if (LCSSAPhi.getIncomingValue(0) == Phi) {
3689       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
3690     }
3691   }
3692 }
3693 
3694 void InnerLoopVectorizer::fixReduction(PHINode *Phi) {
3695   Constant *Zero = Builder.getInt32(0);
3696 
3697   // Get it's reduction variable descriptor.
3698   assert(Legal->isReductionVariable(Phi) &&
3699          "Unable to find the reduction variable");
3700   RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi];
3701 
3702   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3703   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
3704   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
3705   RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
3706     RdxDesc.getMinMaxRecurrenceKind();
3707   setDebugLocFromInst(Builder, ReductionStartValue);
3708 
3709   // We need to generate a reduction vector from the incoming scalar.
3710   // To do so, we need to generate the 'identity' vector and override
3711   // one of the elements with the incoming scalar reduction. We need
3712   // to do it in the vector-loop preheader.
3713   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3714 
3715   // This is the vector-clone of the value that leaves the loop.
3716   Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType();
3717 
3718   // Find the reduction identity variable. Zero for addition, or, xor,
3719   // one for multiplication, -1 for And.
3720   Value *Identity;
3721   Value *VectorStart;
3722   if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
3723       RK == RecurrenceDescriptor::RK_FloatMinMax) {
3724     // MinMax reduction have the start value as their identify.
3725     if (VF == 1) {
3726       VectorStart = Identity = ReductionStartValue;
3727     } else {
3728       VectorStart = Identity =
3729         Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident");
3730     }
3731   } else {
3732     // Handle other reduction kinds:
3733     Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
3734         RK, VecTy->getScalarType());
3735     if (VF == 1) {
3736       Identity = Iden;
3737       // This vector is the Identity vector where the first element is the
3738       // incoming scalar reduction.
3739       VectorStart = ReductionStartValue;
3740     } else {
3741       Identity = ConstantVector::getSplat(VF, Iden);
3742 
3743       // This vector is the Identity vector where the first element is the
3744       // incoming scalar reduction.
3745       VectorStart =
3746         Builder.CreateInsertElement(Identity, ReductionStartValue, Zero);
3747     }
3748   }
3749 
3750   // Wrap flags are in general invalid after vectorization, clear them.
3751   clearReductionWrapFlags(RdxDesc);
3752 
3753   // Fix the vector-loop phi.
3754 
3755   // Reductions do not have to start at zero. They can start with
3756   // any loop invariant values.
3757   BasicBlock *Latch = OrigLoop->getLoopLatch();
3758   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
3759 
3760   for (unsigned Part = 0; Part < UF; ++Part) {
3761     Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part);
3762     Value *Val = getOrCreateVectorValue(LoopVal, Part);
3763     // Make sure to add the reduction start value only to the
3764     // first unroll part.
3765     Value *StartVal = (Part == 0) ? VectorStart : Identity;
3766     cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader);
3767     cast<PHINode>(VecRdxPhi)
3768       ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3769   }
3770 
3771   // Before each round, move the insertion point right between
3772   // the PHIs and the values we are going to write.
3773   // This allows us to write both PHINodes and the extractelement
3774   // instructions.
3775   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3776 
3777   setDebugLocFromInst(Builder, LoopExitInst);
3778 
3779   // If tail is folded by masking, the vector value to leave the loop should be
3780   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
3781   // instead of the former.
3782   if (Cost->foldTailByMasking()) {
3783     for (unsigned Part = 0; Part < UF; ++Part) {
3784       Value *VecLoopExitInst =
3785           VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3786       Value *Sel = nullptr;
3787       for (User *U : VecLoopExitInst->users()) {
3788         if (isa<SelectInst>(U)) {
3789           assert(!Sel && "Reduction exit feeding two selects");
3790           Sel = U;
3791         } else
3792           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
3793       }
3794       assert(Sel && "Reduction exit feeds no select");
3795       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, Sel);
3796     }
3797   }
3798 
3799   // If the vector reduction can be performed in a smaller type, we truncate
3800   // then extend the loop exit value to enable InstCombine to evaluate the
3801   // entire expression in the smaller type.
3802   if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) {
3803     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
3804     Builder.SetInsertPoint(
3805         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
3806     VectorParts RdxParts(UF);
3807     for (unsigned Part = 0; Part < UF; ++Part) {
3808       RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3809       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3810       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
3811                                         : Builder.CreateZExt(Trunc, VecTy);
3812       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
3813            UI != RdxParts[Part]->user_end();)
3814         if (*UI != Trunc) {
3815           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
3816           RdxParts[Part] = Extnd;
3817         } else {
3818           ++UI;
3819         }
3820     }
3821     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3822     for (unsigned Part = 0; Part < UF; ++Part) {
3823       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3824       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]);
3825     }
3826   }
3827 
3828   // Reduce all of the unrolled parts into a single vector.
3829   Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0);
3830   unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
3831 
3832   // The middle block terminator has already been assigned a DebugLoc here (the
3833   // OrigLoop's single latch terminator). We want the whole middle block to
3834   // appear to execute on this line because: (a) it is all compiler generated,
3835   // (b) these instructions are always executed after evaluating the latch
3836   // conditional branch, and (c) other passes may add new predecessors which
3837   // terminate on this line. This is the easiest way to ensure we don't
3838   // accidentally cause an extra step back into the loop while debugging.
3839   setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
3840   for (unsigned Part = 1; Part < UF; ++Part) {
3841     Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3842     if (Op != Instruction::ICmp && Op != Instruction::FCmp)
3843       // Floating point operations had to be 'fast' to enable the reduction.
3844       ReducedPartRdx = addFastMathFlag(
3845           Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart,
3846                               ReducedPartRdx, "bin.rdx"),
3847           RdxDesc.getFastMathFlags());
3848     else
3849       ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx,
3850                                       RdxPart);
3851   }
3852 
3853   if (VF > 1) {
3854     bool NoNaN = Legal->hasFunNoNaNAttr();
3855     ReducedPartRdx =
3856         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN);
3857     // If the reduction can be performed in a smaller type, we need to extend
3858     // the reduction to the wider type before we branch to the original loop.
3859     if (Phi->getType() != RdxDesc.getRecurrenceType())
3860       ReducedPartRdx =
3861         RdxDesc.isSigned()
3862         ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
3863         : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
3864   }
3865 
3866   // Create a phi node that merges control-flow from the backedge-taken check
3867   // block and the middle block.
3868   PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
3869                                         LoopScalarPreHeader->getTerminator());
3870   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
3871     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
3872   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
3873 
3874   // Now, we need to fix the users of the reduction variable
3875   // inside and outside of the scalar remainder loop.
3876   // We know that the loop is in LCSSA form. We need to update the
3877   // PHI nodes in the exit blocks.
3878   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3879     // All PHINodes need to have a single entry edge, or two if
3880     // we already fixed them.
3881     assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
3882 
3883     // We found a reduction value exit-PHI. Update it with the
3884     // incoming bypass edge.
3885     if (LCSSAPhi.getIncomingValue(0) == LoopExitInst)
3886       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
3887   } // end of the LCSSA phi scan.
3888 
3889     // Fix the scalar loop reduction variable with the incoming reduction sum
3890     // from the vector body and from the backedge value.
3891   int IncomingEdgeBlockIdx =
3892     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
3893   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
3894   // Pick the other block.
3895   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
3896   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
3897   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
3898 }
3899 
3900 void InnerLoopVectorizer::clearReductionWrapFlags(
3901     RecurrenceDescriptor &RdxDesc) {
3902   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3903   if (RK != RecurrenceDescriptor::RK_IntegerAdd &&
3904       RK != RecurrenceDescriptor::RK_IntegerMult)
3905     return;
3906 
3907   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
3908   assert(LoopExitInstr && "null loop exit instruction");
3909   SmallVector<Instruction *, 8> Worklist;
3910   SmallPtrSet<Instruction *, 8> Visited;
3911   Worklist.push_back(LoopExitInstr);
3912   Visited.insert(LoopExitInstr);
3913 
3914   while (!Worklist.empty()) {
3915     Instruction *Cur = Worklist.pop_back_val();
3916     if (isa<OverflowingBinaryOperator>(Cur))
3917       for (unsigned Part = 0; Part < UF; ++Part) {
3918         Value *V = getOrCreateVectorValue(Cur, Part);
3919         cast<Instruction>(V)->dropPoisonGeneratingFlags();
3920       }
3921 
3922     for (User *U : Cur->users()) {
3923       Instruction *UI = cast<Instruction>(U);
3924       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
3925           Visited.insert(UI).second)
3926         Worklist.push_back(UI);
3927     }
3928   }
3929 }
3930 
3931 void InnerLoopVectorizer::fixLCSSAPHIs() {
3932   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3933     if (LCSSAPhi.getNumIncomingValues() == 1) {
3934       auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
3935       // Non-instruction incoming values will have only one value.
3936       unsigned LastLane = 0;
3937       if (isa<Instruction>(IncomingValue))
3938           LastLane = Cost->isUniformAfterVectorization(
3939                          cast<Instruction>(IncomingValue), VF)
3940                          ? 0
3941                          : VF - 1;
3942       // Can be a loop invariant incoming value or the last scalar value to be
3943       // extracted from the vectorized loop.
3944       Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3945       Value *lastIncomingValue =
3946           getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane });
3947       LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
3948     }
3949   }
3950 }
3951 
3952 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
3953   // The basic block and loop containing the predicated instruction.
3954   auto *PredBB = PredInst->getParent();
3955   auto *VectorLoop = LI->getLoopFor(PredBB);
3956 
3957   // Initialize a worklist with the operands of the predicated instruction.
3958   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
3959 
3960   // Holds instructions that we need to analyze again. An instruction may be
3961   // reanalyzed if we don't yet know if we can sink it or not.
3962   SmallVector<Instruction *, 8> InstsToReanalyze;
3963 
3964   // Returns true if a given use occurs in the predicated block. Phi nodes use
3965   // their operands in their corresponding predecessor blocks.
3966   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
3967     auto *I = cast<Instruction>(U.getUser());
3968     BasicBlock *BB = I->getParent();
3969     if (auto *Phi = dyn_cast<PHINode>(I))
3970       BB = Phi->getIncomingBlock(
3971           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
3972     return BB == PredBB;
3973   };
3974 
3975   // Iteratively sink the scalarized operands of the predicated instruction
3976   // into the block we created for it. When an instruction is sunk, it's
3977   // operands are then added to the worklist. The algorithm ends after one pass
3978   // through the worklist doesn't sink a single instruction.
3979   bool Changed;
3980   do {
3981     // Add the instructions that need to be reanalyzed to the worklist, and
3982     // reset the changed indicator.
3983     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
3984     InstsToReanalyze.clear();
3985     Changed = false;
3986 
3987     while (!Worklist.empty()) {
3988       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
3989 
3990       // We can't sink an instruction if it is a phi node, is already in the
3991       // predicated block, is not in the loop, or may have side effects.
3992       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
3993           !VectorLoop->contains(I) || I->mayHaveSideEffects())
3994         continue;
3995 
3996       // It's legal to sink the instruction if all its uses occur in the
3997       // predicated block. Otherwise, there's nothing to do yet, and we may
3998       // need to reanalyze the instruction.
3999       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4000         InstsToReanalyze.push_back(I);
4001         continue;
4002       }
4003 
4004       // Move the instruction to the beginning of the predicated block, and add
4005       // it's operands to the worklist.
4006       I->moveBefore(&*PredBB->getFirstInsertionPt());
4007       Worklist.insert(I->op_begin(), I->op_end());
4008 
4009       // The sinking may have enabled other instructions to be sunk, so we will
4010       // need to iterate.
4011       Changed = true;
4012     }
4013   } while (Changed);
4014 }
4015 
4016 void InnerLoopVectorizer::fixNonInductionPHIs() {
4017   for (PHINode *OrigPhi : OrigPHIsToFix) {
4018     PHINode *NewPhi =
4019         cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0));
4020     unsigned NumIncomingValues = OrigPhi->getNumIncomingValues();
4021 
4022     SmallVector<BasicBlock *, 2> ScalarBBPredecessors(
4023         predecessors(OrigPhi->getParent()));
4024     SmallVector<BasicBlock *, 2> VectorBBPredecessors(
4025         predecessors(NewPhi->getParent()));
4026     assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() &&
4027            "Scalar and Vector BB should have the same number of predecessors");
4028 
4029     // The insertion point in Builder may be invalidated by the time we get
4030     // here. Force the Builder insertion point to something valid so that we do
4031     // not run into issues during insertion point restore in
4032     // getOrCreateVectorValue calls below.
4033     Builder.SetInsertPoint(NewPhi);
4034 
4035     // The predecessor order is preserved and we can rely on mapping between
4036     // scalar and vector block predecessors.
4037     for (unsigned i = 0; i < NumIncomingValues; ++i) {
4038       BasicBlock *NewPredBB = VectorBBPredecessors[i];
4039 
4040       // When looking up the new scalar/vector values to fix up, use incoming
4041       // values from original phi.
4042       Value *ScIncV =
4043           OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]);
4044 
4045       // Scalar incoming value may need a broadcast
4046       Value *NewIncV = getOrCreateVectorValue(ScIncV, 0);
4047       NewPhi->addIncoming(NewIncV, NewPredBB);
4048     }
4049   }
4050 }
4051 
4052 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, unsigned UF,
4053                                    unsigned VF, bool IsPtrLoopInvariant,
4054                                    SmallBitVector &IsIndexLoopInvariant) {
4055   // Construct a vector GEP by widening the operands of the scalar GEP as
4056   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4057   // results in a vector of pointers when at least one operand of the GEP
4058   // is vector-typed. Thus, to keep the representation compact, we only use
4059   // vector-typed operands for loop-varying values.
4060 
4061   if (VF > 1 && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4062     // If we are vectorizing, but the GEP has only loop-invariant operands,
4063     // the GEP we build (by only using vector-typed operands for
4064     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4065     // produce a vector of pointers, we need to either arbitrarily pick an
4066     // operand to broadcast, or broadcast a clone of the original GEP.
4067     // Here, we broadcast a clone of the original.
4068     //
4069     // TODO: If at some point we decide to scalarize instructions having
4070     //       loop-invariant operands, this special case will no longer be
4071     //       required. We would add the scalarization decision to
4072     //       collectLoopScalars() and teach getVectorValue() to broadcast
4073     //       the lane-zero scalar value.
4074     auto *Clone = Builder.Insert(GEP->clone());
4075     for (unsigned Part = 0; Part < UF; ++Part) {
4076       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4077       VectorLoopValueMap.setVectorValue(GEP, Part, EntryPart);
4078       addMetadata(EntryPart, GEP);
4079     }
4080   } else {
4081     // If the GEP has at least one loop-varying operand, we are sure to
4082     // produce a vector of pointers. But if we are only unrolling, we want
4083     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4084     // produce with the code below will be scalar (if VF == 1) or vector
4085     // (otherwise). Note that for the unroll-only case, we still maintain
4086     // values in the vector mapping with initVector, as we do for other
4087     // instructions.
4088     for (unsigned Part = 0; Part < UF; ++Part) {
4089       // The pointer operand of the new GEP. If it's loop-invariant, we
4090       // won't broadcast it.
4091       auto *Ptr = IsPtrLoopInvariant
4092                       ? GEP->getPointerOperand()
4093                       : getOrCreateVectorValue(GEP->getPointerOperand(), Part);
4094 
4095       // Collect all the indices for the new GEP. If any index is
4096       // loop-invariant, we won't broadcast it.
4097       SmallVector<Value *, 4> Indices;
4098       for (auto Index : enumerate(GEP->indices())) {
4099         Value *User = Index.value().get();
4100         if (IsIndexLoopInvariant[Index.index()])
4101           Indices.push_back(User);
4102         else
4103           Indices.push_back(getOrCreateVectorValue(User, Part));
4104       }
4105 
4106       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4107       // but it should be a vector, otherwise.
4108       auto *NewGEP =
4109           GEP->isInBounds()
4110               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4111                                           Indices)
4112               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4113       assert((VF == 1 || NewGEP->getType()->isVectorTy()) &&
4114              "NewGEP is not a pointer vector");
4115       VectorLoopValueMap.setVectorValue(GEP, Part, NewGEP);
4116       addMetadata(NewGEP, GEP);
4117     }
4118   }
4119 }
4120 
4121 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF,
4122                                               unsigned VF) {
4123   PHINode *P = cast<PHINode>(PN);
4124   if (EnableVPlanNativePath) {
4125     // Currently we enter here in the VPlan-native path for non-induction
4126     // PHIs where all control flow is uniform. We simply widen these PHIs.
4127     // Create a vector phi with no operands - the vector phi operands will be
4128     // set at the end of vector code generation.
4129     Type *VecTy =
4130         (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
4131     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4132     VectorLoopValueMap.setVectorValue(P, 0, VecPhi);
4133     OrigPHIsToFix.push_back(P);
4134 
4135     return;
4136   }
4137 
4138   assert(PN->getParent() == OrigLoop->getHeader() &&
4139          "Non-header phis should have been handled elsewhere");
4140 
4141   // In order to support recurrences we need to be able to vectorize Phi nodes.
4142   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4143   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4144   // this value when we vectorize all of the instructions that use the PHI.
4145   if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
4146     for (unsigned Part = 0; Part < UF; ++Part) {
4147       // This is phase one of vectorizing PHIs.
4148       Type *VecTy =
4149           (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
4150       Value *EntryPart = PHINode::Create(
4151           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4152       VectorLoopValueMap.setVectorValue(P, Part, EntryPart);
4153     }
4154     return;
4155   }
4156 
4157   setDebugLocFromInst(Builder, P);
4158 
4159   // This PHINode must be an induction variable.
4160   // Make sure that we know about it.
4161   assert(Legal->getInductionVars()->count(P) && "Not an induction variable");
4162 
4163   InductionDescriptor II = Legal->getInductionVars()->lookup(P);
4164   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4165 
4166   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4167   // which can be found from the original scalar operations.
4168   switch (II.getKind()) {
4169   case InductionDescriptor::IK_NoInduction:
4170     llvm_unreachable("Unknown induction");
4171   case InductionDescriptor::IK_IntInduction:
4172   case InductionDescriptor::IK_FpInduction:
4173     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4174   case InductionDescriptor::IK_PtrInduction: {
4175     // Handle the pointer induction variable case.
4176     assert(P->getType()->isPointerTy() && "Unexpected type.");
4177     // This is the normalized GEP that starts counting at zero.
4178     Value *PtrInd = Induction;
4179     PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType());
4180     // Determine the number of scalars we need to generate for each unroll
4181     // iteration. If the instruction is uniform, we only need to generate the
4182     // first lane. Otherwise, we generate all VF values.
4183     unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF;
4184     // These are the scalar results. Notice that we don't generate vector GEPs
4185     // because scalar GEPs result in better code.
4186     for (unsigned Part = 0; Part < UF; ++Part) {
4187       for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4188         Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF);
4189         Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4190         Value *SclrGep =
4191             emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4192         SclrGep->setName("next.gep");
4193         VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep);
4194       }
4195     }
4196     return;
4197   }
4198   }
4199 }
4200 
4201 /// A helper function for checking whether an integer division-related
4202 /// instruction may divide by zero (in which case it must be predicated if
4203 /// executed conditionally in the scalar code).
4204 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4205 /// Non-zero divisors that are non compile-time constants will not be
4206 /// converted into multiplication, so we will still end up scalarizing
4207 /// the division, but can do so w/o predication.
4208 static bool mayDivideByZero(Instruction &I) {
4209   assert((I.getOpcode() == Instruction::UDiv ||
4210           I.getOpcode() == Instruction::SDiv ||
4211           I.getOpcode() == Instruction::URem ||
4212           I.getOpcode() == Instruction::SRem) &&
4213          "Unexpected instruction");
4214   Value *Divisor = I.getOperand(1);
4215   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4216   return !CInt || CInt->isZero();
4217 }
4218 
4219 void InnerLoopVectorizer::widenInstruction(Instruction &I) {
4220   switch (I.getOpcode()) {
4221   case Instruction::Br:
4222   case Instruction::PHI:
4223   case Instruction::GetElementPtr:
4224     llvm_unreachable("This instruction is handled by a different recipe.");
4225   case Instruction::UDiv:
4226   case Instruction::SDiv:
4227   case Instruction::SRem:
4228   case Instruction::URem:
4229   case Instruction::Add:
4230   case Instruction::FAdd:
4231   case Instruction::Sub:
4232   case Instruction::FSub:
4233   case Instruction::FNeg:
4234   case Instruction::Mul:
4235   case Instruction::FMul:
4236   case Instruction::FDiv:
4237   case Instruction::FRem:
4238   case Instruction::Shl:
4239   case Instruction::LShr:
4240   case Instruction::AShr:
4241   case Instruction::And:
4242   case Instruction::Or:
4243   case Instruction::Xor: {
4244     // Just widen unops and binops.
4245     setDebugLocFromInst(Builder, &I);
4246 
4247     for (unsigned Part = 0; Part < UF; ++Part) {
4248       SmallVector<Value *, 2> Ops;
4249       for (Value *Op : I.operands())
4250         Ops.push_back(getOrCreateVectorValue(Op, Part));
4251 
4252       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
4253 
4254       if (auto *VecOp = dyn_cast<Instruction>(V))
4255         VecOp->copyIRFlags(&I);
4256 
4257       // Use this vector value for all users of the original instruction.
4258       VectorLoopValueMap.setVectorValue(&I, Part, V);
4259       addMetadata(V, &I);
4260     }
4261 
4262     break;
4263   }
4264   case Instruction::Select: {
4265     // Widen selects.
4266     // If the selector is loop invariant we can create a select
4267     // instruction with a scalar condition. Otherwise, use vector-select.
4268     auto *SE = PSE.getSE();
4269     bool InvariantCond =
4270         SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop);
4271     setDebugLocFromInst(Builder, &I);
4272 
4273     // The condition can be loop invariant  but still defined inside the
4274     // loop. This means that we can't just use the original 'cond' value.
4275     // We have to take the 'vectorized' value and pick the first lane.
4276     // Instcombine will make this a no-op.
4277 
4278     auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0});
4279 
4280     for (unsigned Part = 0; Part < UF; ++Part) {
4281       Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part);
4282       Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part);
4283       Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part);
4284       Value *Sel =
4285           Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1);
4286       VectorLoopValueMap.setVectorValue(&I, Part, Sel);
4287       addMetadata(Sel, &I);
4288     }
4289 
4290     break;
4291   }
4292 
4293   case Instruction::ICmp:
4294   case Instruction::FCmp: {
4295     // Widen compares. Generate vector compares.
4296     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4297     auto *Cmp = cast<CmpInst>(&I);
4298     setDebugLocFromInst(Builder, Cmp);
4299     for (unsigned Part = 0; Part < UF; ++Part) {
4300       Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part);
4301       Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part);
4302       Value *C = nullptr;
4303       if (FCmp) {
4304         // Propagate fast math flags.
4305         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4306         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4307         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4308       } else {
4309         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4310       }
4311       VectorLoopValueMap.setVectorValue(&I, Part, C);
4312       addMetadata(C, &I);
4313     }
4314 
4315     break;
4316   }
4317 
4318   case Instruction::ZExt:
4319   case Instruction::SExt:
4320   case Instruction::FPToUI:
4321   case Instruction::FPToSI:
4322   case Instruction::FPExt:
4323   case Instruction::PtrToInt:
4324   case Instruction::IntToPtr:
4325   case Instruction::SIToFP:
4326   case Instruction::UIToFP:
4327   case Instruction::Trunc:
4328   case Instruction::FPTrunc:
4329   case Instruction::BitCast: {
4330     auto *CI = cast<CastInst>(&I);
4331     setDebugLocFromInst(Builder, CI);
4332 
4333     /// Vectorize casts.
4334     Type *DestTy =
4335         (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF);
4336 
4337     for (unsigned Part = 0; Part < UF; ++Part) {
4338       Value *A = getOrCreateVectorValue(CI->getOperand(0), Part);
4339       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4340       VectorLoopValueMap.setVectorValue(&I, Part, Cast);
4341       addMetadata(Cast, &I);
4342     }
4343     break;
4344   }
4345 
4346   case Instruction::Call: {
4347     // Ignore dbg intrinsics.
4348     if (isa<DbgInfoIntrinsic>(I))
4349       break;
4350     setDebugLocFromInst(Builder, &I);
4351 
4352     Module *M = I.getParent()->getParent()->getParent();
4353     auto *CI = cast<CallInst>(&I);
4354 
4355     SmallVector<Type *, 4> Tys;
4356     for (Value *ArgOperand : CI->arg_operands())
4357       Tys.push_back(ToVectorTy(ArgOperand->getType(), VF));
4358 
4359     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4360 
4361     // The flag shows whether we use Intrinsic or a usual Call for vectorized
4362     // version of the instruction.
4363     // Is it beneficial to perform intrinsic call compared to lib call?
4364     bool NeedToScalarize;
4365     unsigned CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4366     bool UseVectorIntrinsic =
4367         ID && Cost->getVectorIntrinsicCost(CI, VF) <= CallCost;
4368     assert((UseVectorIntrinsic || !NeedToScalarize) &&
4369            "Instruction should be scalarized elsewhere.");
4370 
4371     for (unsigned Part = 0; Part < UF; ++Part) {
4372       SmallVector<Value *, 4> Args;
4373       for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) {
4374         Value *Arg = CI->getArgOperand(i);
4375         // Some intrinsics have a scalar argument - don't replace it with a
4376         // vector.
4377         if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i))
4378           Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part);
4379         Args.push_back(Arg);
4380       }
4381 
4382       Function *VectorF;
4383       if (UseVectorIntrinsic) {
4384         // Use vector version of the intrinsic.
4385         Type *TysForDecl[] = {CI->getType()};
4386         if (VF > 1)
4387           TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4388         VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4389       } else {
4390         // Use vector version of the function call.
4391         const VFShape Shape =
4392             VFShape::get(*CI, {VF, false} /*EC*/, false /*HasGlobalPred*/);
4393 #ifndef NDEBUG
4394         const SmallVector<VFInfo, 8> Infos = VFDatabase::getMappings(*CI);
4395         assert(std::find_if(Infos.begin(), Infos.end(),
4396                             [&Shape](const VFInfo &Info) {
4397                               return Info.Shape == Shape;
4398                             }) != Infos.end() &&
4399                "Vector function shape is missing from the database.");
4400 #endif
4401         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4402       }
4403       assert(VectorF && "Can't create vector function.");
4404 
4405       SmallVector<OperandBundleDef, 1> OpBundles;
4406       CI->getOperandBundlesAsDefs(OpBundles);
4407       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4408 
4409       if (isa<FPMathOperator>(V))
4410         V->copyFastMathFlags(CI);
4411 
4412       VectorLoopValueMap.setVectorValue(&I, Part, V);
4413       addMetadata(V, &I);
4414     }
4415 
4416     break;
4417   }
4418 
4419   default:
4420     // This instruction is not vectorized by simple widening.
4421     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4422     llvm_unreachable("Unhandled instruction!");
4423   } // end of switch.
4424 }
4425 
4426 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
4427   // We should not collect Scalars more than once per VF. Right now, this
4428   // function is called from collectUniformsAndScalars(), which already does
4429   // this check. Collecting Scalars for VF=1 does not make any sense.
4430   assert(VF >= 2 && Scalars.find(VF) == Scalars.end() &&
4431          "This function should not be visited twice for the same VF");
4432 
4433   SmallSetVector<Instruction *, 8> Worklist;
4434 
4435   // These sets are used to seed the analysis with pointers used by memory
4436   // accesses that will remain scalar.
4437   SmallSetVector<Instruction *, 8> ScalarPtrs;
4438   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4439 
4440   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4441   // The pointer operands of loads and stores will be scalar as long as the
4442   // memory access is not a gather or scatter operation. The value operand of a
4443   // store will remain scalar if the store is scalarized.
4444   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4445     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4446     assert(WideningDecision != CM_Unknown &&
4447            "Widening decision should be ready at this moment");
4448     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4449       if (Ptr == Store->getValueOperand())
4450         return WideningDecision == CM_Scalarize;
4451     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4452            "Ptr is neither a value or pointer operand");
4453     return WideningDecision != CM_GatherScatter;
4454   };
4455 
4456   // A helper that returns true if the given value is a bitcast or
4457   // getelementptr instruction contained in the loop.
4458   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4459     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4460             isa<GetElementPtrInst>(V)) &&
4461            !TheLoop->isLoopInvariant(V);
4462   };
4463 
4464   // A helper that evaluates a memory access's use of a pointer. If the use
4465   // will be a scalar use, and the pointer is only used by memory accesses, we
4466   // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4467   // PossibleNonScalarPtrs.
4468   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4469     // We only care about bitcast and getelementptr instructions contained in
4470     // the loop.
4471     if (!isLoopVaryingBitCastOrGEP(Ptr))
4472       return;
4473 
4474     // If the pointer has already been identified as scalar (e.g., if it was
4475     // also identified as uniform), there's nothing to do.
4476     auto *I = cast<Instruction>(Ptr);
4477     if (Worklist.count(I))
4478       return;
4479 
4480     // If the use of the pointer will be a scalar use, and all users of the
4481     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4482     // place the pointer in PossibleNonScalarPtrs.
4483     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4484           return isa<LoadInst>(U) || isa<StoreInst>(U);
4485         }))
4486       ScalarPtrs.insert(I);
4487     else
4488       PossibleNonScalarPtrs.insert(I);
4489   };
4490 
4491   // We seed the scalars analysis with three classes of instructions: (1)
4492   // instructions marked uniform-after-vectorization, (2) bitcast and
4493   // getelementptr instructions used by memory accesses requiring a scalar use,
4494   // and (3) pointer induction variables and their update instructions (we
4495   // currently only scalarize these).
4496   //
4497   // (1) Add to the worklist all instructions that have been identified as
4498   // uniform-after-vectorization.
4499   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4500 
4501   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4502   // memory accesses requiring a scalar use. The pointer operands of loads and
4503   // stores will be scalar as long as the memory accesses is not a gather or
4504   // scatter operation. The value operand of a store will remain scalar if the
4505   // store is scalarized.
4506   for (auto *BB : TheLoop->blocks())
4507     for (auto &I : *BB) {
4508       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4509         evaluatePtrUse(Load, Load->getPointerOperand());
4510       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4511         evaluatePtrUse(Store, Store->getPointerOperand());
4512         evaluatePtrUse(Store, Store->getValueOperand());
4513       }
4514     }
4515   for (auto *I : ScalarPtrs)
4516     if (PossibleNonScalarPtrs.find(I) == PossibleNonScalarPtrs.end()) {
4517       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4518       Worklist.insert(I);
4519     }
4520 
4521   // (3) Add to the worklist all pointer induction variables and their update
4522   // instructions.
4523   //
4524   // TODO: Once we are able to vectorize pointer induction variables we should
4525   //       no longer insert them into the worklist here.
4526   auto *Latch = TheLoop->getLoopLatch();
4527   for (auto &Induction : *Legal->getInductionVars()) {
4528     auto *Ind = Induction.first;
4529     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4530     if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction)
4531       continue;
4532     Worklist.insert(Ind);
4533     Worklist.insert(IndUpdate);
4534     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4535     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4536                       << "\n");
4537   }
4538 
4539   // Insert the forced scalars.
4540   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4541   // induction variable when the PHI user is scalarized.
4542   auto ForcedScalar = ForcedScalars.find(VF);
4543   if (ForcedScalar != ForcedScalars.end())
4544     for (auto *I : ForcedScalar->second)
4545       Worklist.insert(I);
4546 
4547   // Expand the worklist by looking through any bitcasts and getelementptr
4548   // instructions we've already identified as scalar. This is similar to the
4549   // expansion step in collectLoopUniforms(); however, here we're only
4550   // expanding to include additional bitcasts and getelementptr instructions.
4551   unsigned Idx = 0;
4552   while (Idx != Worklist.size()) {
4553     Instruction *Dst = Worklist[Idx++];
4554     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4555       continue;
4556     auto *Src = cast<Instruction>(Dst->getOperand(0));
4557     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4558           auto *J = cast<Instruction>(U);
4559           return !TheLoop->contains(J) || Worklist.count(J) ||
4560                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4561                   isScalarUse(J, Src));
4562         })) {
4563       Worklist.insert(Src);
4564       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4565     }
4566   }
4567 
4568   // An induction variable will remain scalar if all users of the induction
4569   // variable and induction variable update remain scalar.
4570   for (auto &Induction : *Legal->getInductionVars()) {
4571     auto *Ind = Induction.first;
4572     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4573 
4574     // We already considered pointer induction variables, so there's no reason
4575     // to look at their users again.
4576     //
4577     // TODO: Once we are able to vectorize pointer induction variables we
4578     //       should no longer skip over them here.
4579     if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction)
4580       continue;
4581 
4582     // Determine if all users of the induction variable are scalar after
4583     // vectorization.
4584     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4585       auto *I = cast<Instruction>(U);
4586       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
4587     });
4588     if (!ScalarInd)
4589       continue;
4590 
4591     // Determine if all users of the induction variable update instruction are
4592     // scalar after vectorization.
4593     auto ScalarIndUpdate =
4594         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4595           auto *I = cast<Instruction>(U);
4596           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
4597         });
4598     if (!ScalarIndUpdate)
4599       continue;
4600 
4601     // The induction variable and its update instruction will remain scalar.
4602     Worklist.insert(Ind);
4603     Worklist.insert(IndUpdate);
4604     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4605     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4606                       << "\n");
4607   }
4608 
4609   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4610 }
4611 
4612 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) {
4613   if (!blockNeedsPredication(I->getParent()))
4614     return false;
4615   switch(I->getOpcode()) {
4616   default:
4617     break;
4618   case Instruction::Load:
4619   case Instruction::Store: {
4620     if (!Legal->isMaskRequired(I))
4621       return false;
4622     auto *Ptr = getLoadStorePointerOperand(I);
4623     auto *Ty = getMemInstValueType(I);
4624     // We have already decided how to vectorize this instruction, get that
4625     // result.
4626     if (VF > 1) {
4627       InstWidening WideningDecision = getWideningDecision(I, VF);
4628       assert(WideningDecision != CM_Unknown &&
4629              "Widening decision should be ready at this moment");
4630       return WideningDecision == CM_Scalarize;
4631     }
4632     const MaybeAlign Alignment = getLoadStoreAlignment(I);
4633     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4634                                 isLegalMaskedGather(Ty, Alignment))
4635                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4636                                 isLegalMaskedScatter(Ty, Alignment));
4637   }
4638   case Instruction::UDiv:
4639   case Instruction::SDiv:
4640   case Instruction::SRem:
4641   case Instruction::URem:
4642     return mayDivideByZero(*I);
4643   }
4644   return false;
4645 }
4646 
4647 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I,
4648                                                                unsigned VF) {
4649   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4650   assert(getWideningDecision(I, VF) == CM_Unknown &&
4651          "Decision should not be set yet.");
4652   auto *Group = getInterleavedAccessGroup(I);
4653   assert(Group && "Must have a group.");
4654 
4655   // If the instruction's allocated size doesn't equal it's type size, it
4656   // requires padding and will be scalarized.
4657   auto &DL = I->getModule()->getDataLayout();
4658   auto *ScalarTy = getMemInstValueType(I);
4659   if (hasIrregularType(ScalarTy, DL, VF))
4660     return false;
4661 
4662   // Check if masking is required.
4663   // A Group may need masking for one of two reasons: it resides in a block that
4664   // needs predication, or it was decided to use masking to deal with gaps.
4665   bool PredicatedAccessRequiresMasking =
4666       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
4667   bool AccessWithGapsRequiresMasking =
4668       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
4669   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
4670     return true;
4671 
4672   // If masked interleaving is required, we expect that the user/target had
4673   // enabled it, because otherwise it either wouldn't have been created or
4674   // it should have been invalidated by the CostModel.
4675   assert(useMaskedInterleavedAccesses(TTI) &&
4676          "Masked interleave-groups for predicated accesses are not enabled.");
4677 
4678   auto *Ty = getMemInstValueType(I);
4679   const MaybeAlign Alignment = getLoadStoreAlignment(I);
4680   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4681                           : TTI.isLegalMaskedStore(Ty, Alignment);
4682 }
4683 
4684 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I,
4685                                                                unsigned VF) {
4686   // Get and ensure we have a valid memory instruction.
4687   LoadInst *LI = dyn_cast<LoadInst>(I);
4688   StoreInst *SI = dyn_cast<StoreInst>(I);
4689   assert((LI || SI) && "Invalid memory instruction");
4690 
4691   auto *Ptr = getLoadStorePointerOperand(I);
4692 
4693   // In order to be widened, the pointer should be consecutive, first of all.
4694   if (!Legal->isConsecutivePtr(Ptr))
4695     return false;
4696 
4697   // If the instruction is a store located in a predicated block, it will be
4698   // scalarized.
4699   if (isScalarWithPredication(I))
4700     return false;
4701 
4702   // If the instruction's allocated size doesn't equal it's type size, it
4703   // requires padding and will be scalarized.
4704   auto &DL = I->getModule()->getDataLayout();
4705   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
4706   if (hasIrregularType(ScalarTy, DL, VF))
4707     return false;
4708 
4709   return true;
4710 }
4711 
4712 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
4713   // We should not collect Uniforms more than once per VF. Right now,
4714   // this function is called from collectUniformsAndScalars(), which
4715   // already does this check. Collecting Uniforms for VF=1 does not make any
4716   // sense.
4717 
4718   assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() &&
4719          "This function should not be visited twice for the same VF");
4720 
4721   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4722   // not analyze again.  Uniforms.count(VF) will return 1.
4723   Uniforms[VF].clear();
4724 
4725   // We now know that the loop is vectorizable!
4726   // Collect instructions inside the loop that will remain uniform after
4727   // vectorization.
4728 
4729   // Global values, params and instructions outside of current loop are out of
4730   // scope.
4731   auto isOutOfScope = [&](Value *V) -> bool {
4732     Instruction *I = dyn_cast<Instruction>(V);
4733     return (!I || !TheLoop->contains(I));
4734   };
4735 
4736   SetVector<Instruction *> Worklist;
4737   BasicBlock *Latch = TheLoop->getLoopLatch();
4738 
4739   // Instructions that are scalar with predication must not be considered
4740   // uniform after vectorization, because that would create an erroneous
4741   // replicating region where only a single instance out of VF should be formed.
4742   // TODO: optimize such seldom cases if found important, see PR40816.
4743   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
4744     if (isScalarWithPredication(I, VF)) {
4745       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
4746                         << *I << "\n");
4747       return;
4748     }
4749     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
4750     Worklist.insert(I);
4751   };
4752 
4753   // Start with the conditional branch. If the branch condition is an
4754   // instruction contained in the loop that is only used by the branch, it is
4755   // uniform.
4756   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4757   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
4758     addToWorklistIfAllowed(Cmp);
4759 
4760   // Holds consecutive and consecutive-like pointers. Consecutive-like pointers
4761   // are pointers that are treated like consecutive pointers during
4762   // vectorization. The pointer operands of interleaved accesses are an
4763   // example.
4764   SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs;
4765 
4766   // Holds pointer operands of instructions that are possibly non-uniform.
4767   SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs;
4768 
4769   auto isUniformDecision = [&](Instruction *I, unsigned VF) {
4770     InstWidening WideningDecision = getWideningDecision(I, VF);
4771     assert(WideningDecision != CM_Unknown &&
4772            "Widening decision should be ready at this moment");
4773 
4774     return (WideningDecision == CM_Widen ||
4775             WideningDecision == CM_Widen_Reverse ||
4776             WideningDecision == CM_Interleave);
4777   };
4778   // Iterate over the instructions in the loop, and collect all
4779   // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible
4780   // that a consecutive-like pointer operand will be scalarized, we collect it
4781   // in PossibleNonUniformPtrs instead. We use two sets here because a single
4782   // getelementptr instruction can be used by both vectorized and scalarized
4783   // memory instructions. For example, if a loop loads and stores from the same
4784   // location, but the store is conditional, the store will be scalarized, and
4785   // the getelementptr won't remain uniform.
4786   for (auto *BB : TheLoop->blocks())
4787     for (auto &I : *BB) {
4788       // If there's no pointer operand, there's nothing to do.
4789       auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
4790       if (!Ptr)
4791         continue;
4792 
4793       // True if all users of Ptr are memory accesses that have Ptr as their
4794       // pointer operand.
4795       auto UsersAreMemAccesses =
4796           llvm::all_of(Ptr->users(), [&](User *U) -> bool {
4797             return getLoadStorePointerOperand(U) == Ptr;
4798           });
4799 
4800       // Ensure the memory instruction will not be scalarized or used by
4801       // gather/scatter, making its pointer operand non-uniform. If the pointer
4802       // operand is used by any instruction other than a memory access, we
4803       // conservatively assume the pointer operand may be non-uniform.
4804       if (!UsersAreMemAccesses || !isUniformDecision(&I, VF))
4805         PossibleNonUniformPtrs.insert(Ptr);
4806 
4807       // If the memory instruction will be vectorized and its pointer operand
4808       // is consecutive-like, or interleaving - the pointer operand should
4809       // remain uniform.
4810       else
4811         ConsecutiveLikePtrs.insert(Ptr);
4812     }
4813 
4814   // Add to the Worklist all consecutive and consecutive-like pointers that
4815   // aren't also identified as possibly non-uniform.
4816   for (auto *V : ConsecutiveLikePtrs)
4817     if (PossibleNonUniformPtrs.find(V) == PossibleNonUniformPtrs.end())
4818       addToWorklistIfAllowed(V);
4819 
4820   // Expand Worklist in topological order: whenever a new instruction
4821   // is added , its users should be already inside Worklist.  It ensures
4822   // a uniform instruction will only be used by uniform instructions.
4823   unsigned idx = 0;
4824   while (idx != Worklist.size()) {
4825     Instruction *I = Worklist[idx++];
4826 
4827     for (auto OV : I->operand_values()) {
4828       // isOutOfScope operands cannot be uniform instructions.
4829       if (isOutOfScope(OV))
4830         continue;
4831       // First order recurrence Phi's should typically be considered
4832       // non-uniform.
4833       auto *OP = dyn_cast<PHINode>(OV);
4834       if (OP && Legal->isFirstOrderRecurrence(OP))
4835         continue;
4836       // If all the users of the operand are uniform, then add the
4837       // operand into the uniform worklist.
4838       auto *OI = cast<Instruction>(OV);
4839       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
4840             auto *J = cast<Instruction>(U);
4841             return Worklist.count(J) ||
4842                    (OI == getLoadStorePointerOperand(J) &&
4843                     isUniformDecision(J, VF));
4844           }))
4845         addToWorklistIfAllowed(OI);
4846     }
4847   }
4848 
4849   // Returns true if Ptr is the pointer operand of a memory access instruction
4850   // I, and I is known to not require scalarization.
4851   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
4852     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
4853   };
4854 
4855   // For an instruction to be added into Worklist above, all its users inside
4856   // the loop should also be in Worklist. However, this condition cannot be
4857   // true for phi nodes that form a cyclic dependence. We must process phi
4858   // nodes separately. An induction variable will remain uniform if all users
4859   // of the induction variable and induction variable update remain uniform.
4860   // The code below handles both pointer and non-pointer induction variables.
4861   for (auto &Induction : *Legal->getInductionVars()) {
4862     auto *Ind = Induction.first;
4863     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4864 
4865     // Determine if all users of the induction variable are uniform after
4866     // vectorization.
4867     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4868       auto *I = cast<Instruction>(U);
4869       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4870              isVectorizedMemAccessUse(I, Ind);
4871     });
4872     if (!UniformInd)
4873       continue;
4874 
4875     // Determine if all users of the induction variable update instruction are
4876     // uniform after vectorization.
4877     auto UniformIndUpdate =
4878         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4879           auto *I = cast<Instruction>(U);
4880           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4881                  isVectorizedMemAccessUse(I, IndUpdate);
4882         });
4883     if (!UniformIndUpdate)
4884       continue;
4885 
4886     // The induction variable and its update instruction will remain uniform.
4887     addToWorklistIfAllowed(Ind);
4888     addToWorklistIfAllowed(IndUpdate);
4889   }
4890 
4891   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
4892 }
4893 
4894 bool LoopVectorizationCostModel::runtimeChecksRequired() {
4895   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
4896 
4897   if (Legal->getRuntimePointerChecking()->Need) {
4898     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
4899         "runtime pointer checks needed. Enable vectorization of this "
4900         "loop with '#pragma clang loop vectorize(enable)' when "
4901         "compiling with -Os/-Oz",
4902         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4903     return true;
4904   }
4905 
4906   if (!PSE.getUnionPredicate().getPredicates().empty()) {
4907     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
4908         "runtime SCEV checks needed. Enable vectorization of this "
4909         "loop with '#pragma clang loop vectorize(enable)' when "
4910         "compiling with -Os/-Oz",
4911         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4912     return true;
4913   }
4914 
4915   // FIXME: Avoid specializing for stride==1 instead of bailing out.
4916   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
4917     reportVectorizationFailure("Runtime stride check is required with -Os/-Oz",
4918         "runtime stride == 1 checks needed. Enable vectorization of "
4919         "this loop with '#pragma clang loop vectorize(enable)' when "
4920         "compiling with -Os/-Oz",
4921         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4922     return true;
4923   }
4924 
4925   return false;
4926 }
4927 
4928 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF() {
4929   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
4930     // TODO: It may by useful to do since it's still likely to be dynamically
4931     // uniform if the target can skip.
4932     reportVectorizationFailure(
4933         "Not inserting runtime ptr check for divergent target",
4934         "runtime pointer checks needed. Not enabled for divergent target",
4935         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
4936     return None;
4937   }
4938 
4939   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
4940   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
4941   if (TC == 1) {
4942     reportVectorizationFailure("Single iteration (non) loop",
4943         "loop trip count is one, irrelevant for vectorization",
4944         "SingleIterationLoop", ORE, TheLoop);
4945     return None;
4946   }
4947 
4948   switch (ScalarEpilogueStatus) {
4949   case CM_ScalarEpilogueAllowed:
4950     return computeFeasibleMaxVF(TC);
4951   case CM_ScalarEpilogueNotNeededUsePredicate:
4952     LLVM_DEBUG(
4953         dbgs() << "LV: vector predicate hint/switch found.\n"
4954                << "LV: Not allowing scalar epilogue, creating predicated "
4955                << "vector loop.\n");
4956     break;
4957   case CM_ScalarEpilogueNotAllowedLowTripLoop:
4958     // fallthrough as a special case of OptForSize
4959   case CM_ScalarEpilogueNotAllowedOptSize:
4960     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
4961       LLVM_DEBUG(
4962           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
4963     else
4964       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
4965                         << "count.\n");
4966 
4967     // Bail if runtime checks are required, which are not good when optimising
4968     // for size.
4969     if (runtimeChecksRequired())
4970       return None;
4971     break;
4972   }
4973 
4974   // Now try the tail folding
4975 
4976   // Invalidate interleave groups that require an epilogue if we can't mask
4977   // the interleave-group.
4978   if (!useMaskedInterleavedAccesses(TTI))
4979     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
4980 
4981   unsigned MaxVF = computeFeasibleMaxVF(TC);
4982   if (TC > 0 && TC % MaxVF == 0) {
4983     // Accept MaxVF if we do not have a tail.
4984     LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
4985     return MaxVF;
4986   }
4987 
4988   // If we don't know the precise trip count, or if the trip count that we
4989   // found modulo the vectorization factor is not zero, try to fold the tail
4990   // by masking.
4991   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
4992   if (Legal->prepareToFoldTailByMasking()) {
4993     FoldTailByMasking = true;
4994     return MaxVF;
4995   }
4996 
4997   if (TC == 0) {
4998     reportVectorizationFailure(
4999         "Unable to calculate the loop count due to complex control flow",
5000         "unable to calculate the loop count due to complex control flow",
5001         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5002     return None;
5003   }
5004 
5005   reportVectorizationFailure(
5006       "Cannot optimize for size and vectorize at the same time.",
5007       "cannot optimize for size and vectorize at the same time. "
5008       "Enable vectorization of this loop with '#pragma clang loop "
5009       "vectorize(enable)' when compiling with -Os/-Oz",
5010       "NoTailLoopWithOptForSize", ORE, TheLoop);
5011   return None;
5012 }
5013 
5014 unsigned
5015 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount) {
5016   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5017   unsigned SmallestType, WidestType;
5018   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5019   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
5020 
5021   // Get the maximum safe dependence distance in bits computed by LAA.
5022   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5023   // the memory accesses that is most restrictive (involved in the smallest
5024   // dependence distance).
5025   unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth();
5026 
5027   WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth);
5028 
5029   unsigned MaxVectorSize = WidestRegister / WidestType;
5030 
5031   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5032                     << " / " << WidestType << " bits.\n");
5033   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5034                     << WidestRegister << " bits.\n");
5035 
5036   assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements"
5037                                  " into one vector!");
5038   if (MaxVectorSize == 0) {
5039     LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
5040     MaxVectorSize = 1;
5041     return MaxVectorSize;
5042   } else if (ConstTripCount && ConstTripCount < MaxVectorSize &&
5043              isPowerOf2_32(ConstTripCount)) {
5044     // We need to clamp the VF to be the ConstTripCount. There is no point in
5045     // choosing a higher viable VF as done in the loop below.
5046     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5047                       << ConstTripCount << "\n");
5048     MaxVectorSize = ConstTripCount;
5049     return MaxVectorSize;
5050   }
5051 
5052   unsigned MaxVF = MaxVectorSize;
5053   if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) ||
5054       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5055     // Collect all viable vectorization factors larger than the default MaxVF
5056     // (i.e. MaxVectorSize).
5057     SmallVector<unsigned, 8> VFs;
5058     unsigned NewMaxVectorSize = WidestRegister / SmallestType;
5059     for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2)
5060       VFs.push_back(VS);
5061 
5062     // For each VF calculate its register usage.
5063     auto RUs = calculateRegisterUsage(VFs);
5064 
5065     // Select the largest VF which doesn't require more registers than existing
5066     // ones.
5067     for (int i = RUs.size() - 1; i >= 0; --i) {
5068       bool Selected = true;
5069       for (auto& pair : RUs[i].MaxLocalUsers) {
5070         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5071         if (pair.second > TargetNumRegisters)
5072           Selected = false;
5073       }
5074       if (Selected) {
5075         MaxVF = VFs[i];
5076         break;
5077       }
5078     }
5079     if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) {
5080       if (MaxVF < MinVF) {
5081         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5082                           << ") with target's minimum: " << MinVF << '\n');
5083         MaxVF = MinVF;
5084       }
5085     }
5086   }
5087   return MaxVF;
5088 }
5089 
5090 VectorizationFactor
5091 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
5092   float Cost = expectedCost(1).first;
5093   const float ScalarCost = Cost;
5094   unsigned Width = 1;
5095   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
5096 
5097   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5098   if (ForceVectorization && MaxVF > 1) {
5099     // Ignore scalar width, because the user explicitly wants vectorization.
5100     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5101     // evaluation.
5102     Cost = std::numeric_limits<float>::max();
5103   }
5104 
5105   for (unsigned i = 2; i <= MaxVF; i *= 2) {
5106     // Notice that the vector loop needs to be executed less times, so
5107     // we need to divide the cost of the vector loops by the width of
5108     // the vector elements.
5109     VectorizationCostTy C = expectedCost(i);
5110     float VectorCost = C.first / (float)i;
5111     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5112                       << " costs: " << (int)VectorCost << ".\n");
5113     if (!C.second && !ForceVectorization) {
5114       LLVM_DEBUG(
5115           dbgs() << "LV: Not considering vector loop of width " << i
5116                  << " because it will not generate any vector instructions.\n");
5117       continue;
5118     }
5119     if (VectorCost < Cost) {
5120       Cost = VectorCost;
5121       Width = i;
5122     }
5123   }
5124 
5125   if (!EnableCondStoresVectorization && NumPredStores) {
5126     reportVectorizationFailure("There are conditional stores.",
5127         "store that is conditionally executed prevents vectorization",
5128         "ConditionalStore", ORE, TheLoop);
5129     Width = 1;
5130     Cost = ScalarCost;
5131   }
5132 
5133   LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
5134              << "LV: Vectorization seems to be not beneficial, "
5135              << "but was forced by a user.\n");
5136   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
5137   VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)};
5138   return Factor;
5139 }
5140 
5141 std::pair<unsigned, unsigned>
5142 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5143   unsigned MinWidth = -1U;
5144   unsigned MaxWidth = 8;
5145   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5146 
5147   // For each block.
5148   for (BasicBlock *BB : TheLoop->blocks()) {
5149     // For each instruction in the loop.
5150     for (Instruction &I : BB->instructionsWithoutDebug()) {
5151       Type *T = I.getType();
5152 
5153       // Skip ignored values.
5154       if (ValuesToIgnore.find(&I) != ValuesToIgnore.end())
5155         continue;
5156 
5157       // Only examine Loads, Stores and PHINodes.
5158       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5159         continue;
5160 
5161       // Examine PHI nodes that are reduction variables. Update the type to
5162       // account for the recurrence type.
5163       if (auto *PN = dyn_cast<PHINode>(&I)) {
5164         if (!Legal->isReductionVariable(PN))
5165           continue;
5166         RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN];
5167         T = RdxDesc.getRecurrenceType();
5168       }
5169 
5170       // Examine the stored values.
5171       if (auto *ST = dyn_cast<StoreInst>(&I))
5172         T = ST->getValueOperand()->getType();
5173 
5174       // Ignore loaded pointer types and stored pointer types that are not
5175       // vectorizable.
5176       //
5177       // FIXME: The check here attempts to predict whether a load or store will
5178       //        be vectorized. We only know this for certain after a VF has
5179       //        been selected. Here, we assume that if an access can be
5180       //        vectorized, it will be. We should also look at extending this
5181       //        optimization to non-pointer types.
5182       //
5183       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
5184           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
5185         continue;
5186 
5187       MinWidth = std::min(MinWidth,
5188                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5189       MaxWidth = std::max(MaxWidth,
5190                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5191     }
5192   }
5193 
5194   return {MinWidth, MaxWidth};
5195 }
5196 
5197 unsigned LoopVectorizationCostModel::selectInterleaveCount(unsigned VF,
5198                                                            unsigned LoopCost) {
5199   // -- The interleave heuristics --
5200   // We interleave the loop in order to expose ILP and reduce the loop overhead.
5201   // There are many micro-architectural considerations that we can't predict
5202   // at this level. For example, frontend pressure (on decode or fetch) due to
5203   // code size, or the number and capabilities of the execution ports.
5204   //
5205   // We use the following heuristics to select the interleave count:
5206   // 1. If the code has reductions, then we interleave to break the cross
5207   // iteration dependency.
5208   // 2. If the loop is really small, then we interleave to reduce the loop
5209   // overhead.
5210   // 3. We don't interleave if we think that we will spill registers to memory
5211   // due to the increased register pressure.
5212 
5213   if (!isScalarEpilogueAllowed())
5214     return 1;
5215 
5216   // We used the distance for the interleave count.
5217   if (Legal->getMaxSafeDepDistBytes() != -1U)
5218     return 1;
5219 
5220   // Do not interleave loops with a relatively small known or estimated trip
5221   // count.
5222   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
5223   if (BestKnownTC && *BestKnownTC < TinyTripCountInterleaveThreshold)
5224     return 1;
5225 
5226   RegisterUsage R = calculateRegisterUsage({VF})[0];
5227   // We divide by these constants so assume that we have at least one
5228   // instruction that uses at least one register.
5229   for (auto& pair : R.MaxLocalUsers) {
5230     pair.second = std::max(pair.second, 1U);
5231   }
5232 
5233   // We calculate the interleave count using the following formula.
5234   // Subtract the number of loop invariants from the number of available
5235   // registers. These registers are used by all of the interleaved instances.
5236   // Next, divide the remaining registers by the number of registers that is
5237   // required by the loop, in order to estimate how many parallel instances
5238   // fit without causing spills. All of this is rounded down if necessary to be
5239   // a power of two. We want power of two interleave count to simplify any
5240   // addressing operations or alignment considerations.
5241   // We also want power of two interleave counts to ensure that the induction
5242   // variable of the vector loop wraps to zero, when tail is folded by masking;
5243   // this currently happens when OptForSize, in which case IC is set to 1 above.
5244   unsigned IC = UINT_MAX;
5245 
5246   for (auto& pair : R.MaxLocalUsers) {
5247     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5248     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
5249                       << " registers of "
5250                       << TTI.getRegisterClassName(pair.first) << " register class\n");
5251     if (VF == 1) {
5252       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
5253         TargetNumRegisters = ForceTargetNumScalarRegs;
5254     } else {
5255       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
5256         TargetNumRegisters = ForceTargetNumVectorRegs;
5257     }
5258     unsigned MaxLocalUsers = pair.second;
5259     unsigned LoopInvariantRegs = 0;
5260     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
5261       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
5262 
5263     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
5264     // Don't count the induction variable as interleaved.
5265     if (EnableIndVarRegisterHeur) {
5266       TmpIC =
5267           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
5268                         std::max(1U, (MaxLocalUsers - 1)));
5269     }
5270 
5271     IC = std::min(IC, TmpIC);
5272   }
5273 
5274   // Clamp the interleave ranges to reasonable counts.
5275   unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
5276 
5277   // Check if the user has overridden the max.
5278   if (VF == 1) {
5279     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
5280       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
5281   } else {
5282     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
5283       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
5284   }
5285 
5286   // If trip count is known or estimated compile time constant, limit the
5287   // interleave count to be less than the trip count divided by VF.
5288   if (BestKnownTC) {
5289     MaxInterleaveCount = std::min(*BestKnownTC / VF, MaxInterleaveCount);
5290   }
5291 
5292   // If we did not calculate the cost for VF (because the user selected the VF)
5293   // then we calculate the cost of VF here.
5294   if (LoopCost == 0)
5295     LoopCost = expectedCost(VF).first;
5296 
5297   assert(LoopCost && "Non-zero loop cost expected");
5298 
5299   // Clamp the calculated IC to be between the 1 and the max interleave count
5300   // that the target and trip count allows.
5301   if (IC > MaxInterleaveCount)
5302     IC = MaxInterleaveCount;
5303   else if (IC < 1)
5304     IC = 1;
5305 
5306   // Interleave if we vectorized this loop and there is a reduction that could
5307   // benefit from interleaving.
5308   if (VF > 1 && !Legal->getReductionVars()->empty()) {
5309     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
5310     return IC;
5311   }
5312 
5313   // Note that if we've already vectorized the loop we will have done the
5314   // runtime check and so interleaving won't require further checks.
5315   bool InterleavingRequiresRuntimePointerCheck =
5316       (VF == 1 && Legal->getRuntimePointerChecking()->Need);
5317 
5318   // We want to interleave small loops in order to reduce the loop overhead and
5319   // potentially expose ILP opportunities.
5320   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
5321   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
5322     // We assume that the cost overhead is 1 and we use the cost model
5323     // to estimate the cost of the loop and interleave until the cost of the
5324     // loop overhead is about 5% of the cost of the loop.
5325     unsigned SmallIC =
5326         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
5327 
5328     // Interleave until store/load ports (estimated by max interleave count) are
5329     // saturated.
5330     unsigned NumStores = Legal->getNumStores();
5331     unsigned NumLoads = Legal->getNumLoads();
5332     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5333     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5334 
5335     // If we have a scalar reduction (vector reductions are already dealt with
5336     // by this point), we can increase the critical path length if the loop
5337     // we're interleaving is inside another loop. Limit, by default to 2, so the
5338     // critical path only gets increased by one reduction operation.
5339     if (!Legal->getReductionVars()->empty() && TheLoop->getLoopDepth() > 1) {
5340       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
5341       SmallIC = std::min(SmallIC, F);
5342       StoresIC = std::min(StoresIC, F);
5343       LoadsIC = std::min(LoadsIC, F);
5344     }
5345 
5346     if (EnableLoadStoreRuntimeInterleave &&
5347         std::max(StoresIC, LoadsIC) > SmallIC) {
5348       LLVM_DEBUG(
5349           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
5350       return std::max(StoresIC, LoadsIC);
5351     }
5352 
5353     LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
5354     return SmallIC;
5355   }
5356 
5357   // Interleave if this is a large loop (small loops are already dealt with by
5358   // this point) that could benefit from interleaving.
5359   bool HasReductions = !Legal->getReductionVars()->empty();
5360   if (TTI.enableAggressiveInterleaving(HasReductions)) {
5361     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5362     return IC;
5363   }
5364 
5365   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
5366   return 1;
5367 }
5368 
5369 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
5370 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
5371   // This function calculates the register usage by measuring the highest number
5372   // of values that are alive at a single location. Obviously, this is a very
5373   // rough estimation. We scan the loop in a topological order in order and
5374   // assign a number to each instruction. We use RPO to ensure that defs are
5375   // met before their users. We assume that each instruction that has in-loop
5376   // users starts an interval. We record every time that an in-loop value is
5377   // used, so we have a list of the first and last occurrences of each
5378   // instruction. Next, we transpose this data structure into a multi map that
5379   // holds the list of intervals that *end* at a specific location. This multi
5380   // map allows us to perform a linear search. We scan the instructions linearly
5381   // and record each time that a new interval starts, by placing it in a set.
5382   // If we find this value in the multi-map then we remove it from the set.
5383   // The max register usage is the maximum size of the set.
5384   // We also search for instructions that are defined outside the loop, but are
5385   // used inside the loop. We need this number separately from the max-interval
5386   // usage number because when we unroll, loop-invariant values do not take
5387   // more register.
5388   LoopBlocksDFS DFS(TheLoop);
5389   DFS.perform(LI);
5390 
5391   RegisterUsage RU;
5392 
5393   // Each 'key' in the map opens a new interval. The values
5394   // of the map are the index of the 'last seen' usage of the
5395   // instruction that is the key.
5396   using IntervalMap = DenseMap<Instruction *, unsigned>;
5397 
5398   // Maps instruction to its index.
5399   SmallVector<Instruction *, 64> IdxToInstr;
5400   // Marks the end of each interval.
5401   IntervalMap EndPoint;
5402   // Saves the list of instruction indices that are used in the loop.
5403   SmallPtrSet<Instruction *, 8> Ends;
5404   // Saves the list of values that are used in the loop but are
5405   // defined outside the loop, such as arguments and constants.
5406   SmallPtrSet<Value *, 8> LoopInvariants;
5407 
5408   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
5409     for (Instruction &I : BB->instructionsWithoutDebug()) {
5410       IdxToInstr.push_back(&I);
5411 
5412       // Save the end location of each USE.
5413       for (Value *U : I.operands()) {
5414         auto *Instr = dyn_cast<Instruction>(U);
5415 
5416         // Ignore non-instruction values such as arguments, constants, etc.
5417         if (!Instr)
5418           continue;
5419 
5420         // If this instruction is outside the loop then record it and continue.
5421         if (!TheLoop->contains(Instr)) {
5422           LoopInvariants.insert(Instr);
5423           continue;
5424         }
5425 
5426         // Overwrite previous end points.
5427         EndPoint[Instr] = IdxToInstr.size();
5428         Ends.insert(Instr);
5429       }
5430     }
5431   }
5432 
5433   // Saves the list of intervals that end with the index in 'key'.
5434   using InstrList = SmallVector<Instruction *, 2>;
5435   DenseMap<unsigned, InstrList> TransposeEnds;
5436 
5437   // Transpose the EndPoints to a list of values that end at each index.
5438   for (auto &Interval : EndPoint)
5439     TransposeEnds[Interval.second].push_back(Interval.first);
5440 
5441   SmallPtrSet<Instruction *, 8> OpenIntervals;
5442 
5443   // Get the size of the widest register.
5444   unsigned MaxSafeDepDist = -1U;
5445   if (Legal->getMaxSafeDepDistBytes() != -1U)
5446     MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
5447   unsigned WidestRegister =
5448       std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist);
5449   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5450 
5451   SmallVector<RegisterUsage, 8> RUs(VFs.size());
5452   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
5453 
5454   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
5455 
5456   // A lambda that gets the register usage for the given type and VF.
5457   auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
5458     if (Ty->isTokenTy())
5459       return 0U;
5460     unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
5461     return std::max<unsigned>(1, VF * TypeSize / WidestRegister);
5462   };
5463 
5464   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
5465     Instruction *I = IdxToInstr[i];
5466 
5467     // Remove all of the instructions that end at this location.
5468     InstrList &List = TransposeEnds[i];
5469     for (Instruction *ToRemove : List)
5470       OpenIntervals.erase(ToRemove);
5471 
5472     // Ignore instructions that are never used within the loop.
5473     if (Ends.find(I) == Ends.end())
5474       continue;
5475 
5476     // Skip ignored values.
5477     if (ValuesToIgnore.find(I) != ValuesToIgnore.end())
5478       continue;
5479 
5480     // For each VF find the maximum usage of registers.
5481     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
5482       // Count the number of live intervals.
5483       SmallMapVector<unsigned, unsigned, 4> RegUsage;
5484 
5485       if (VFs[j] == 1) {
5486         for (auto Inst : OpenIntervals) {
5487           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
5488           if (RegUsage.find(ClassID) == RegUsage.end())
5489             RegUsage[ClassID] = 1;
5490           else
5491             RegUsage[ClassID] += 1;
5492         }
5493       } else {
5494         collectUniformsAndScalars(VFs[j]);
5495         for (auto Inst : OpenIntervals) {
5496           // Skip ignored values for VF > 1.
5497           if (VecValuesToIgnore.find(Inst) != VecValuesToIgnore.end())
5498             continue;
5499           if (isScalarAfterVectorization(Inst, VFs[j])) {
5500             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
5501             if (RegUsage.find(ClassID) == RegUsage.end())
5502               RegUsage[ClassID] = 1;
5503             else
5504               RegUsage[ClassID] += 1;
5505           } else {
5506             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
5507             if (RegUsage.find(ClassID) == RegUsage.end())
5508               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
5509             else
5510               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
5511           }
5512         }
5513       }
5514 
5515       for (auto& pair : RegUsage) {
5516         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
5517           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
5518         else
5519           MaxUsages[j][pair.first] = pair.second;
5520       }
5521     }
5522 
5523     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
5524                       << OpenIntervals.size() << '\n');
5525 
5526     // Add the current instruction to the list of open intervals.
5527     OpenIntervals.insert(I);
5528   }
5529 
5530   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
5531     SmallMapVector<unsigned, unsigned, 4> Invariant;
5532 
5533     for (auto Inst : LoopInvariants) {
5534       unsigned Usage = VFs[i] == 1 ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
5535       unsigned ClassID = TTI.getRegisterClassForType(VFs[i] > 1, Inst->getType());
5536       if (Invariant.find(ClassID) == Invariant.end())
5537         Invariant[ClassID] = Usage;
5538       else
5539         Invariant[ClassID] += Usage;
5540     }
5541 
5542     LLVM_DEBUG({
5543       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
5544       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
5545              << " item\n";
5546       for (const auto &pair : MaxUsages[i]) {
5547         dbgs() << "LV(REG): RegisterClass: "
5548                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
5549                << " registers\n";
5550       }
5551       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
5552              << " item\n";
5553       for (const auto &pair : Invariant) {
5554         dbgs() << "LV(REG): RegisterClass: "
5555                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
5556                << " registers\n";
5557       }
5558     });
5559 
5560     RU.LoopInvariantRegs = Invariant;
5561     RU.MaxLocalUsers = MaxUsages[i];
5562     RUs[i] = RU;
5563   }
5564 
5565   return RUs;
5566 }
5567 
5568 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
5569   // TODO: Cost model for emulated masked load/store is completely
5570   // broken. This hack guides the cost model to use an artificially
5571   // high enough value to practically disable vectorization with such
5572   // operations, except where previously deployed legality hack allowed
5573   // using very low cost values. This is to avoid regressions coming simply
5574   // from moving "masked load/store" check from legality to cost model.
5575   // Masked Load/Gather emulation was previously never allowed.
5576   // Limited number of Masked Store/Scatter emulation was allowed.
5577   assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction");
5578   return isa<LoadInst>(I) ||
5579          (isa<StoreInst>(I) &&
5580           NumPredStores > NumberOfStoresToPredicate);
5581 }
5582 
5583 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) {
5584   // If we aren't vectorizing the loop, or if we've already collected the
5585   // instructions to scalarize, there's nothing to do. Collection may already
5586   // have occurred if we have a user-selected VF and are now computing the
5587   // expected cost for interleaving.
5588   if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end())
5589     return;
5590 
5591   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
5592   // not profitable to scalarize any instructions, the presence of VF in the
5593   // map will indicate that we've analyzed it already.
5594   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
5595 
5596   // Find all the instructions that are scalar with predication in the loop and
5597   // determine if it would be better to not if-convert the blocks they are in.
5598   // If so, we also record the instructions to scalarize.
5599   for (BasicBlock *BB : TheLoop->blocks()) {
5600     if (!blockNeedsPredication(BB))
5601       continue;
5602     for (Instruction &I : *BB)
5603       if (isScalarWithPredication(&I)) {
5604         ScalarCostsTy ScalarCosts;
5605         // Do not apply discount logic if hacked cost is needed
5606         // for emulated masked memrefs.
5607         if (!useEmulatedMaskMemRefHack(&I) &&
5608             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
5609           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
5610         // Remember that BB will remain after vectorization.
5611         PredicatedBBsAfterVectorization.insert(BB);
5612       }
5613   }
5614 }
5615 
5616 int LoopVectorizationCostModel::computePredInstDiscount(
5617     Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts,
5618     unsigned VF) {
5619   assert(!isUniformAfterVectorization(PredInst, VF) &&
5620          "Instruction marked uniform-after-vectorization will be predicated");
5621 
5622   // Initialize the discount to zero, meaning that the scalar version and the
5623   // vector version cost the same.
5624   int Discount = 0;
5625 
5626   // Holds instructions to analyze. The instructions we visit are mapped in
5627   // ScalarCosts. Those instructions are the ones that would be scalarized if
5628   // we find that the scalar version costs less.
5629   SmallVector<Instruction *, 8> Worklist;
5630 
5631   // Returns true if the given instruction can be scalarized.
5632   auto canBeScalarized = [&](Instruction *I) -> bool {
5633     // We only attempt to scalarize instructions forming a single-use chain
5634     // from the original predicated block that would otherwise be vectorized.
5635     // Although not strictly necessary, we give up on instructions we know will
5636     // already be scalar to avoid traversing chains that are unlikely to be
5637     // beneficial.
5638     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
5639         isScalarAfterVectorization(I, VF))
5640       return false;
5641 
5642     // If the instruction is scalar with predication, it will be analyzed
5643     // separately. We ignore it within the context of PredInst.
5644     if (isScalarWithPredication(I))
5645       return false;
5646 
5647     // If any of the instruction's operands are uniform after vectorization,
5648     // the instruction cannot be scalarized. This prevents, for example, a
5649     // masked load from being scalarized.
5650     //
5651     // We assume we will only emit a value for lane zero of an instruction
5652     // marked uniform after vectorization, rather than VF identical values.
5653     // Thus, if we scalarize an instruction that uses a uniform, we would
5654     // create uses of values corresponding to the lanes we aren't emitting code
5655     // for. This behavior can be changed by allowing getScalarValue to clone
5656     // the lane zero values for uniforms rather than asserting.
5657     for (Use &U : I->operands())
5658       if (auto *J = dyn_cast<Instruction>(U.get()))
5659         if (isUniformAfterVectorization(J, VF))
5660           return false;
5661 
5662     // Otherwise, we can scalarize the instruction.
5663     return true;
5664   };
5665 
5666   // Compute the expected cost discount from scalarizing the entire expression
5667   // feeding the predicated instruction. We currently only consider expressions
5668   // that are single-use instruction chains.
5669   Worklist.push_back(PredInst);
5670   while (!Worklist.empty()) {
5671     Instruction *I = Worklist.pop_back_val();
5672 
5673     // If we've already analyzed the instruction, there's nothing to do.
5674     if (ScalarCosts.find(I) != ScalarCosts.end())
5675       continue;
5676 
5677     // Compute the cost of the vector instruction. Note that this cost already
5678     // includes the scalarization overhead of the predicated instruction.
5679     unsigned VectorCost = getInstructionCost(I, VF).first;
5680 
5681     // Compute the cost of the scalarized instruction. This cost is the cost of
5682     // the instruction as if it wasn't if-converted and instead remained in the
5683     // predicated block. We will scale this cost by block probability after
5684     // computing the scalarization overhead.
5685     unsigned ScalarCost = VF * getInstructionCost(I, 1).first;
5686 
5687     // Compute the scalarization overhead of needed insertelement instructions
5688     // and phi nodes.
5689     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
5690       ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF),
5691                                                  true, false);
5692       ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI);
5693     }
5694 
5695     // Compute the scalarization overhead of needed extractelement
5696     // instructions. For each of the instruction's operands, if the operand can
5697     // be scalarized, add it to the worklist; otherwise, account for the
5698     // overhead.
5699     for (Use &U : I->operands())
5700       if (auto *J = dyn_cast<Instruction>(U.get())) {
5701         assert(VectorType::isValidElementType(J->getType()) &&
5702                "Instruction has non-scalar type");
5703         if (canBeScalarized(J))
5704           Worklist.push_back(J);
5705         else if (needsExtract(J, VF))
5706           ScalarCost += TTI.getScalarizationOverhead(
5707                               ToVectorTy(J->getType(),VF), false, true);
5708       }
5709 
5710     // Scale the total scalar cost by block probability.
5711     ScalarCost /= getReciprocalPredBlockProb();
5712 
5713     // Compute the discount. A non-negative discount means the vector version
5714     // of the instruction costs more, and scalarizing would be beneficial.
5715     Discount += VectorCost - ScalarCost;
5716     ScalarCosts[I] = ScalarCost;
5717   }
5718 
5719   return Discount;
5720 }
5721 
5722 LoopVectorizationCostModel::VectorizationCostTy
5723 LoopVectorizationCostModel::expectedCost(unsigned VF) {
5724   VectorizationCostTy Cost;
5725 
5726   // For each block.
5727   for (BasicBlock *BB : TheLoop->blocks()) {
5728     VectorizationCostTy BlockCost;
5729 
5730     // For each instruction in the old loop.
5731     for (Instruction &I : BB->instructionsWithoutDebug()) {
5732       // Skip ignored values.
5733       if (ValuesToIgnore.find(&I) != ValuesToIgnore.end() ||
5734           (VF > 1 && VecValuesToIgnore.find(&I) != VecValuesToIgnore.end()))
5735         continue;
5736 
5737       VectorizationCostTy C = getInstructionCost(&I, VF);
5738 
5739       // Check if we should override the cost.
5740       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
5741         C.first = ForceTargetInstructionCost;
5742 
5743       BlockCost.first += C.first;
5744       BlockCost.second |= C.second;
5745       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
5746                         << " for VF " << VF << " For instruction: " << I
5747                         << '\n');
5748     }
5749 
5750     // If we are vectorizing a predicated block, it will have been
5751     // if-converted. This means that the block's instructions (aside from
5752     // stores and instructions that may divide by zero) will now be
5753     // unconditionally executed. For the scalar case, we may not always execute
5754     // the predicated block. Thus, scale the block's cost by the probability of
5755     // executing it.
5756     if (VF == 1 && blockNeedsPredication(BB))
5757       BlockCost.first /= getReciprocalPredBlockProb();
5758 
5759     Cost.first += BlockCost.first;
5760     Cost.second |= BlockCost.second;
5761   }
5762 
5763   return Cost;
5764 }
5765 
5766 /// Gets Address Access SCEV after verifying that the access pattern
5767 /// is loop invariant except the induction variable dependence.
5768 ///
5769 /// This SCEV can be sent to the Target in order to estimate the address
5770 /// calculation cost.
5771 static const SCEV *getAddressAccessSCEV(
5772               Value *Ptr,
5773               LoopVectorizationLegality *Legal,
5774               PredicatedScalarEvolution &PSE,
5775               const Loop *TheLoop) {
5776 
5777   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
5778   if (!Gep)
5779     return nullptr;
5780 
5781   // We are looking for a gep with all loop invariant indices except for one
5782   // which should be an induction variable.
5783   auto SE = PSE.getSE();
5784   unsigned NumOperands = Gep->getNumOperands();
5785   for (unsigned i = 1; i < NumOperands; ++i) {
5786     Value *Opd = Gep->getOperand(i);
5787     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
5788         !Legal->isInductionVariable(Opd))
5789       return nullptr;
5790   }
5791 
5792   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
5793   return PSE.getSCEV(Ptr);
5794 }
5795 
5796 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
5797   return Legal->hasStride(I->getOperand(0)) ||
5798          Legal->hasStride(I->getOperand(1));
5799 }
5800 
5801 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5802                                                                  unsigned VF) {
5803   assert(VF > 1 && "Scalarization cost of instruction implies vectorization.");
5804   Type *ValTy = getMemInstValueType(I);
5805   auto SE = PSE.getSE();
5806 
5807   unsigned AS = getLoadStoreAddressSpace(I);
5808   Value *Ptr = getLoadStorePointerOperand(I);
5809   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
5810 
5811   // Figure out whether the access is strided and get the stride value
5812   // if it's known in compile time
5813   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
5814 
5815   // Get the cost of the scalar memory instruction and address computation.
5816   unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
5817 
5818   // Don't pass *I here, since it is scalar but will actually be part of a
5819   // vectorized loop where the user of it is a vectorized instruction.
5820   const MaybeAlign Alignment = getLoadStoreAlignment(I);
5821   Cost += VF * TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(),
5822                                    Alignment, AS);
5823 
5824   // Get the overhead of the extractelement and insertelement instructions
5825   // we might create due to scalarization.
5826   Cost += getScalarizationOverhead(I, VF);
5827 
5828   // If we have a predicated store, it may not be executed for each vector
5829   // lane. Scale the cost by the probability of executing the predicated
5830   // block.
5831   if (isPredicatedInst(I)) {
5832     Cost /= getReciprocalPredBlockProb();
5833 
5834     if (useEmulatedMaskMemRefHack(I))
5835       // Artificially setting to a high enough value to practically disable
5836       // vectorization with such operations.
5837       Cost = 3000000;
5838   }
5839 
5840   return Cost;
5841 }
5842 
5843 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
5844                                                              unsigned VF) {
5845   Type *ValTy = getMemInstValueType(I);
5846   Type *VectorTy = ToVectorTy(ValTy, VF);
5847   Value *Ptr = getLoadStorePointerOperand(I);
5848   unsigned AS = getLoadStoreAddressSpace(I);
5849   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
5850 
5851   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5852          "Stride should be 1 or -1 for consecutive memory access");
5853   const MaybeAlign Alignment = getLoadStoreAlignment(I);
5854   unsigned Cost = 0;
5855   if (Legal->isMaskRequired(I))
5856     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy,
5857                                       Alignment ? Alignment->value() : 0, AS);
5858   else
5859     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I);
5860 
5861   bool Reverse = ConsecutiveStride < 0;
5862   if (Reverse)
5863     Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5864   return Cost;
5865 }
5866 
5867 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
5868                                                          unsigned VF) {
5869   Type *ValTy = getMemInstValueType(I);
5870   Type *VectorTy = ToVectorTy(ValTy, VF);
5871   const MaybeAlign Alignment = getLoadStoreAlignment(I);
5872   unsigned AS = getLoadStoreAddressSpace(I);
5873   if (isa<LoadInst>(I)) {
5874     return TTI.getAddressComputationCost(ValTy) +
5875            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) +
5876            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
5877   }
5878   StoreInst *SI = cast<StoreInst>(I);
5879 
5880   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
5881   return TTI.getAddressComputationCost(ValTy) +
5882          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS) +
5883          (isLoopInvariantStoreValue
5884               ? 0
5885               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
5886                                        VF - 1));
5887 }
5888 
5889 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
5890                                                           unsigned VF) {
5891   Type *ValTy = getMemInstValueType(I);
5892   Type *VectorTy = ToVectorTy(ValTy, VF);
5893   const MaybeAlign Alignment = getLoadStoreAlignment(I);
5894   Value *Ptr = getLoadStorePointerOperand(I);
5895 
5896   return TTI.getAddressComputationCost(VectorTy) +
5897          TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
5898                                     Legal->isMaskRequired(I),
5899                                     Alignment ? Alignment->value() : 0);
5900 }
5901 
5902 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
5903                                                             unsigned VF) {
5904   Type *ValTy = getMemInstValueType(I);
5905   Type *VectorTy = ToVectorTy(ValTy, VF);
5906   unsigned AS = getLoadStoreAddressSpace(I);
5907 
5908   auto Group = getInterleavedAccessGroup(I);
5909   assert(Group && "Fail to get an interleaved access group.");
5910 
5911   unsigned InterleaveFactor = Group->getFactor();
5912   Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
5913 
5914   // Holds the indices of existing members in an interleaved load group.
5915   // An interleaved store group doesn't need this as it doesn't allow gaps.
5916   SmallVector<unsigned, 4> Indices;
5917   if (isa<LoadInst>(I)) {
5918     for (unsigned i = 0; i < InterleaveFactor; i++)
5919       if (Group->getMember(i))
5920         Indices.push_back(i);
5921   }
5922 
5923   // Calculate the cost of the whole interleaved group.
5924   bool UseMaskForGaps =
5925       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
5926   unsigned Cost = TTI.getInterleavedMemoryOpCost(
5927       I->getOpcode(), WideVecTy, Group->getFactor(), Indices,
5928       Group->getAlignment(), AS, Legal->isMaskRequired(I), UseMaskForGaps);
5929 
5930   if (Group->isReverse()) {
5931     // TODO: Add support for reversed masked interleaved access.
5932     assert(!Legal->isMaskRequired(I) &&
5933            "Reverse masked interleaved access not supported.");
5934     Cost += Group->getNumMembers() *
5935             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5936   }
5937   return Cost;
5938 }
5939 
5940 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
5941                                                               unsigned VF) {
5942   // Calculate scalar cost only. Vectorization cost should be ready at this
5943   // moment.
5944   if (VF == 1) {
5945     Type *ValTy = getMemInstValueType(I);
5946     const MaybeAlign Alignment = getLoadStoreAlignment(I);
5947     unsigned AS = getLoadStoreAddressSpace(I);
5948 
5949     return TTI.getAddressComputationCost(ValTy) +
5950            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I);
5951   }
5952   return getWideningCost(I, VF);
5953 }
5954 
5955 LoopVectorizationCostModel::VectorizationCostTy
5956 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
5957   // If we know that this instruction will remain uniform, check the cost of
5958   // the scalar version.
5959   if (isUniformAfterVectorization(I, VF))
5960     VF = 1;
5961 
5962   if (VF > 1 && isProfitableToScalarize(I, VF))
5963     return VectorizationCostTy(InstsToScalarize[VF][I], false);
5964 
5965   // Forced scalars do not have any scalarization overhead.
5966   auto ForcedScalar = ForcedScalars.find(VF);
5967   if (VF > 1 && ForcedScalar != ForcedScalars.end()) {
5968     auto InstSet = ForcedScalar->second;
5969     if (InstSet.find(I) != InstSet.end())
5970       return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false);
5971   }
5972 
5973   Type *VectorTy;
5974   unsigned C = getInstructionCost(I, VF, VectorTy);
5975 
5976   bool TypeNotScalarized =
5977       VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF;
5978   return VectorizationCostTy(C, TypeNotScalarized);
5979 }
5980 
5981 unsigned LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
5982                                                               unsigned VF) {
5983 
5984   if (VF == 1)
5985     return 0;
5986 
5987   unsigned Cost = 0;
5988   Type *RetTy = ToVectorTy(I->getType(), VF);
5989   if (!RetTy->isVoidTy() &&
5990       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
5991     Cost += TTI.getScalarizationOverhead(RetTy, true, false);
5992 
5993   // Some targets keep addresses scalar.
5994   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
5995     return Cost;
5996 
5997   // Some targets support efficient element stores.
5998   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
5999     return Cost;
6000 
6001   // Collect operands to consider.
6002   CallInst *CI = dyn_cast<CallInst>(I);
6003   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
6004 
6005   // Skip operands that do not require extraction/scalarization and do not incur
6006   // any overhead.
6007   return Cost + TTI.getOperandsScalarizationOverhead(
6008                     filterExtractingOperands(Ops, VF), VF);
6009 }
6010 
6011 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) {
6012   if (VF == 1)
6013     return;
6014   NumPredStores = 0;
6015   for (BasicBlock *BB : TheLoop->blocks()) {
6016     // For each instruction in the old loop.
6017     for (Instruction &I : *BB) {
6018       Value *Ptr =  getLoadStorePointerOperand(&I);
6019       if (!Ptr)
6020         continue;
6021 
6022       // TODO: We should generate better code and update the cost model for
6023       // predicated uniform stores. Today they are treated as any other
6024       // predicated store (see added test cases in
6025       // invariant-store-vectorization.ll).
6026       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
6027         NumPredStores++;
6028 
6029       if (Legal->isUniform(Ptr) &&
6030           // Conditional loads and stores should be scalarized and predicated.
6031           // isScalarWithPredication cannot be used here since masked
6032           // gather/scatters are not considered scalar with predication.
6033           !Legal->blockNeedsPredication(I.getParent())) {
6034         // TODO: Avoid replicating loads and stores instead of
6035         // relying on instcombine to remove them.
6036         // Load: Scalar load + broadcast
6037         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
6038         unsigned Cost = getUniformMemOpCost(&I, VF);
6039         setWideningDecision(&I, VF, CM_Scalarize, Cost);
6040         continue;
6041       }
6042 
6043       // We assume that widening is the best solution when possible.
6044       if (memoryInstructionCanBeWidened(&I, VF)) {
6045         unsigned Cost = getConsecutiveMemOpCost(&I, VF);
6046         int ConsecutiveStride =
6047                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
6048         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6049                "Expected consecutive stride.");
6050         InstWidening Decision =
6051             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
6052         setWideningDecision(&I, VF, Decision, Cost);
6053         continue;
6054       }
6055 
6056       // Choose between Interleaving, Gather/Scatter or Scalarization.
6057       unsigned InterleaveCost = std::numeric_limits<unsigned>::max();
6058       unsigned NumAccesses = 1;
6059       if (isAccessInterleaved(&I)) {
6060         auto Group = getInterleavedAccessGroup(&I);
6061         assert(Group && "Fail to get an interleaved access group.");
6062 
6063         // Make one decision for the whole group.
6064         if (getWideningDecision(&I, VF) != CM_Unknown)
6065           continue;
6066 
6067         NumAccesses = Group->getNumMembers();
6068         if (interleavedAccessCanBeWidened(&I, VF))
6069           InterleaveCost = getInterleaveGroupCost(&I, VF);
6070       }
6071 
6072       unsigned GatherScatterCost =
6073           isLegalGatherOrScatter(&I)
6074               ? getGatherScatterCost(&I, VF) * NumAccesses
6075               : std::numeric_limits<unsigned>::max();
6076 
6077       unsigned ScalarizationCost =
6078           getMemInstScalarizationCost(&I, VF) * NumAccesses;
6079 
6080       // Choose better solution for the current VF,
6081       // write down this decision and use it during vectorization.
6082       unsigned Cost;
6083       InstWidening Decision;
6084       if (InterleaveCost <= GatherScatterCost &&
6085           InterleaveCost < ScalarizationCost) {
6086         Decision = CM_Interleave;
6087         Cost = InterleaveCost;
6088       } else if (GatherScatterCost < ScalarizationCost) {
6089         Decision = CM_GatherScatter;
6090         Cost = GatherScatterCost;
6091       } else {
6092         Decision = CM_Scalarize;
6093         Cost = ScalarizationCost;
6094       }
6095       // If the instructions belongs to an interleave group, the whole group
6096       // receives the same decision. The whole group receives the cost, but
6097       // the cost will actually be assigned to one instruction.
6098       if (auto Group = getInterleavedAccessGroup(&I))
6099         setWideningDecision(Group, VF, Decision, Cost);
6100       else
6101         setWideningDecision(&I, VF, Decision, Cost);
6102     }
6103   }
6104 
6105   // Make sure that any load of address and any other address computation
6106   // remains scalar unless there is gather/scatter support. This avoids
6107   // inevitable extracts into address registers, and also has the benefit of
6108   // activating LSR more, since that pass can't optimize vectorized
6109   // addresses.
6110   if (TTI.prefersVectorizedAddressing())
6111     return;
6112 
6113   // Start with all scalar pointer uses.
6114   SmallPtrSet<Instruction *, 8> AddrDefs;
6115   for (BasicBlock *BB : TheLoop->blocks())
6116     for (Instruction &I : *BB) {
6117       Instruction *PtrDef =
6118         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
6119       if (PtrDef && TheLoop->contains(PtrDef) &&
6120           getWideningDecision(&I, VF) != CM_GatherScatter)
6121         AddrDefs.insert(PtrDef);
6122     }
6123 
6124   // Add all instructions used to generate the addresses.
6125   SmallVector<Instruction *, 4> Worklist;
6126   for (auto *I : AddrDefs)
6127     Worklist.push_back(I);
6128   while (!Worklist.empty()) {
6129     Instruction *I = Worklist.pop_back_val();
6130     for (auto &Op : I->operands())
6131       if (auto *InstOp = dyn_cast<Instruction>(Op))
6132         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
6133             AddrDefs.insert(InstOp).second)
6134           Worklist.push_back(InstOp);
6135   }
6136 
6137   for (auto *I : AddrDefs) {
6138     if (isa<LoadInst>(I)) {
6139       // Setting the desired widening decision should ideally be handled in
6140       // by cost functions, but since this involves the task of finding out
6141       // if the loaded register is involved in an address computation, it is
6142       // instead changed here when we know this is the case.
6143       InstWidening Decision = getWideningDecision(I, VF);
6144       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
6145         // Scalarize a widened load of address.
6146         setWideningDecision(I, VF, CM_Scalarize,
6147                             (VF * getMemoryInstructionCost(I, 1)));
6148       else if (auto Group = getInterleavedAccessGroup(I)) {
6149         // Scalarize an interleave group of address loads.
6150         for (unsigned I = 0; I < Group->getFactor(); ++I) {
6151           if (Instruction *Member = Group->getMember(I))
6152             setWideningDecision(Member, VF, CM_Scalarize,
6153                                 (VF * getMemoryInstructionCost(Member, 1)));
6154         }
6155       }
6156     } else
6157       // Make sure I gets scalarized and a cost estimate without
6158       // scalarization overhead.
6159       ForcedScalars[VF].insert(I);
6160   }
6161 }
6162 
6163 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6164                                                         unsigned VF,
6165                                                         Type *&VectorTy) {
6166   Type *RetTy = I->getType();
6167   if (canTruncateToMinimalBitwidth(I, VF))
6168     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
6169   VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
6170   auto SE = PSE.getSE();
6171 
6172   // TODO: We need to estimate the cost of intrinsic calls.
6173   switch (I->getOpcode()) {
6174   case Instruction::GetElementPtr:
6175     // We mark this instruction as zero-cost because the cost of GEPs in
6176     // vectorized code depends on whether the corresponding memory instruction
6177     // is scalarized or not. Therefore, we handle GEPs with the memory
6178     // instruction cost.
6179     return 0;
6180   case Instruction::Br: {
6181     // In cases of scalarized and predicated instructions, there will be VF
6182     // predicated blocks in the vectorized loop. Each branch around these
6183     // blocks requires also an extract of its vector compare i1 element.
6184     bool ScalarPredicatedBB = false;
6185     BranchInst *BI = cast<BranchInst>(I);
6186     if (VF > 1 && BI->isConditional() &&
6187         (PredicatedBBsAfterVectorization.find(BI->getSuccessor(0)) !=
6188              PredicatedBBsAfterVectorization.end() ||
6189          PredicatedBBsAfterVectorization.find(BI->getSuccessor(1)) !=
6190              PredicatedBBsAfterVectorization.end()))
6191       ScalarPredicatedBB = true;
6192 
6193     if (ScalarPredicatedBB) {
6194       // Return cost for branches around scalarized and predicated blocks.
6195       Type *Vec_i1Ty =
6196           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
6197       return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) +
6198               (TTI.getCFInstrCost(Instruction::Br) * VF));
6199     } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1)
6200       // The back-edge branch will remain, as will all scalar branches.
6201       return TTI.getCFInstrCost(Instruction::Br);
6202     else
6203       // This branch will be eliminated by if-conversion.
6204       return 0;
6205     // Note: We currently assume zero cost for an unconditional branch inside
6206     // a predicated block since it will become a fall-through, although we
6207     // may decide in the future to call TTI for all branches.
6208   }
6209   case Instruction::PHI: {
6210     auto *Phi = cast<PHINode>(I);
6211 
6212     // First-order recurrences are replaced by vector shuffles inside the loop.
6213     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
6214     if (VF > 1 && Legal->isFirstOrderRecurrence(Phi))
6215       return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
6216                                 VectorTy, VF - 1, VectorType::get(RetTy, 1));
6217 
6218     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
6219     // converted into select instructions. We require N - 1 selects per phi
6220     // node, where N is the number of incoming values.
6221     if (VF > 1 && Phi->getParent() != TheLoop->getHeader())
6222       return (Phi->getNumIncomingValues() - 1) *
6223              TTI.getCmpSelInstrCost(
6224                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
6225                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF));
6226 
6227     return TTI.getCFInstrCost(Instruction::PHI);
6228   }
6229   case Instruction::UDiv:
6230   case Instruction::SDiv:
6231   case Instruction::URem:
6232   case Instruction::SRem:
6233     // If we have a predicated instruction, it may not be executed for each
6234     // vector lane. Get the scalarization cost and scale this amount by the
6235     // probability of executing the predicated block. If the instruction is not
6236     // predicated, we fall through to the next case.
6237     if (VF > 1 && isScalarWithPredication(I)) {
6238       unsigned Cost = 0;
6239 
6240       // These instructions have a non-void type, so account for the phi nodes
6241       // that we will create. This cost is likely to be zero. The phi node
6242       // cost, if any, should be scaled by the block probability because it
6243       // models a copy at the end of each predicated block.
6244       Cost += VF * TTI.getCFInstrCost(Instruction::PHI);
6245 
6246       // The cost of the non-predicated instruction.
6247       Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy);
6248 
6249       // The cost of insertelement and extractelement instructions needed for
6250       // scalarization.
6251       Cost += getScalarizationOverhead(I, VF);
6252 
6253       // Scale the cost by the probability of executing the predicated blocks.
6254       // This assumes the predicated block for each vector lane is equally
6255       // likely.
6256       return Cost / getReciprocalPredBlockProb();
6257     }
6258     LLVM_FALLTHROUGH;
6259   case Instruction::Add:
6260   case Instruction::FAdd:
6261   case Instruction::Sub:
6262   case Instruction::FSub:
6263   case Instruction::Mul:
6264   case Instruction::FMul:
6265   case Instruction::FDiv:
6266   case Instruction::FRem:
6267   case Instruction::Shl:
6268   case Instruction::LShr:
6269   case Instruction::AShr:
6270   case Instruction::And:
6271   case Instruction::Or:
6272   case Instruction::Xor: {
6273     // Since we will replace the stride by 1 the multiplication should go away.
6274     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
6275       return 0;
6276     // Certain instructions can be cheaper to vectorize if they have a constant
6277     // second vector operand. One example of this are shifts on x86.
6278     Value *Op2 = I->getOperand(1);
6279     TargetTransformInfo::OperandValueProperties Op2VP;
6280     TargetTransformInfo::OperandValueKind Op2VK =
6281         TTI.getOperandInfo(Op2, Op2VP);
6282     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
6283       Op2VK = TargetTransformInfo::OK_UniformValue;
6284 
6285     SmallVector<const Value *, 4> Operands(I->operand_values());
6286     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6287     return N * TTI.getArithmeticInstrCost(
6288                    I->getOpcode(), VectorTy, TargetTransformInfo::OK_AnyValue,
6289                    Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
6290   }
6291   case Instruction::FNeg: {
6292     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6293     return N * TTI.getArithmeticInstrCost(
6294                    I->getOpcode(), VectorTy, TargetTransformInfo::OK_AnyValue,
6295                    TargetTransformInfo::OK_AnyValue,
6296                    TargetTransformInfo::OP_None, TargetTransformInfo::OP_None,
6297                    I->getOperand(0), I);
6298   }
6299   case Instruction::Select: {
6300     SelectInst *SI = cast<SelectInst>(I);
6301     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
6302     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
6303     Type *CondTy = SI->getCondition()->getType();
6304     if (!ScalarCond)
6305       CondTy = VectorType::get(CondTy, VF);
6306 
6307     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I);
6308   }
6309   case Instruction::ICmp:
6310   case Instruction::FCmp: {
6311     Type *ValTy = I->getOperand(0)->getType();
6312     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
6313     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
6314       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
6315     VectorTy = ToVectorTy(ValTy, VF);
6316     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I);
6317   }
6318   case Instruction::Store:
6319   case Instruction::Load: {
6320     unsigned Width = VF;
6321     if (Width > 1) {
6322       InstWidening Decision = getWideningDecision(I, Width);
6323       assert(Decision != CM_Unknown &&
6324              "CM decision should be taken at this point");
6325       if (Decision == CM_Scalarize)
6326         Width = 1;
6327     }
6328     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
6329     return getMemoryInstructionCost(I, VF);
6330   }
6331   case Instruction::ZExt:
6332   case Instruction::SExt:
6333   case Instruction::FPToUI:
6334   case Instruction::FPToSI:
6335   case Instruction::FPExt:
6336   case Instruction::PtrToInt:
6337   case Instruction::IntToPtr:
6338   case Instruction::SIToFP:
6339   case Instruction::UIToFP:
6340   case Instruction::Trunc:
6341   case Instruction::FPTrunc:
6342   case Instruction::BitCast: {
6343     // We optimize the truncation of induction variables having constant
6344     // integer steps. The cost of these truncations is the same as the scalar
6345     // operation.
6346     if (isOptimizableIVTruncate(I, VF)) {
6347       auto *Trunc = cast<TruncInst>(I);
6348       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6349                                   Trunc->getSrcTy(), Trunc);
6350     }
6351 
6352     Type *SrcScalarTy = I->getOperand(0)->getType();
6353     Type *SrcVecTy =
6354         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6355     if (canTruncateToMinimalBitwidth(I, VF)) {
6356       // This cast is going to be shrunk. This may remove the cast or it might
6357       // turn it into slightly different cast. For example, if MinBW == 16,
6358       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
6359       //
6360       // Calculate the modified src and dest types.
6361       Type *MinVecTy = VectorTy;
6362       if (I->getOpcode() == Instruction::Trunc) {
6363         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
6364         VectorTy =
6365             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6366       } else if (I->getOpcode() == Instruction::ZExt ||
6367                  I->getOpcode() == Instruction::SExt) {
6368         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
6369         VectorTy =
6370             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6371       }
6372     }
6373 
6374     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6375     return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I);
6376   }
6377   case Instruction::Call: {
6378     bool NeedToScalarize;
6379     CallInst *CI = cast<CallInst>(I);
6380     unsigned CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
6381     if (getVectorIntrinsicIDForCall(CI, TLI))
6382       return std::min(CallCost, getVectorIntrinsicCost(CI, VF));
6383     return CallCost;
6384   }
6385   default:
6386     // The cost of executing VF copies of the scalar instruction. This opcode
6387     // is unknown. Assume that it is the same as 'mul'.
6388     return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) +
6389            getScalarizationOverhead(I, VF);
6390   } // end of switch.
6391 }
6392 
6393 char LoopVectorize::ID = 0;
6394 
6395 static const char lv_name[] = "Loop Vectorization";
6396 
6397 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
6398 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
6399 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
6400 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
6401 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
6402 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
6403 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
6404 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
6405 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
6406 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
6407 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
6408 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
6409 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
6410 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
6411 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
6412 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
6413 
6414 namespace llvm {
6415 
6416 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
6417 
6418 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
6419                               bool VectorizeOnlyWhenForced) {
6420   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
6421 }
6422 
6423 } // end namespace llvm
6424 
6425 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
6426   // Check if the pointer operand of a load or store instruction is
6427   // consecutive.
6428   if (auto *Ptr = getLoadStorePointerOperand(Inst))
6429     return Legal->isConsecutivePtr(Ptr);
6430   return false;
6431 }
6432 
6433 void LoopVectorizationCostModel::collectValuesToIgnore() {
6434   // Ignore ephemeral values.
6435   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
6436 
6437   // Ignore type-promoting instructions we identified during reduction
6438   // detection.
6439   for (auto &Reduction : *Legal->getReductionVars()) {
6440     RecurrenceDescriptor &RedDes = Reduction.second;
6441     SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6442     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6443   }
6444   // Ignore type-casting instructions we identified during induction
6445   // detection.
6446   for (auto &Induction : *Legal->getInductionVars()) {
6447     InductionDescriptor &IndDes = Induction.second;
6448     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6449     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6450   }
6451 }
6452 
6453 // TODO: we could return a pair of values that specify the max VF and
6454 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
6455 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
6456 // doesn't have a cost model that can choose which plan to execute if
6457 // more than one is generated.
6458 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
6459                                  LoopVectorizationCostModel &CM) {
6460   unsigned WidestType;
6461   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
6462   return WidestVectorRegBits / WidestType;
6463 }
6464 
6465 VectorizationFactor
6466 LoopVectorizationPlanner::planInVPlanNativePath(unsigned UserVF) {
6467   unsigned VF = UserVF;
6468   // Outer loop handling: They may require CFG and instruction level
6469   // transformations before even evaluating whether vectorization is profitable.
6470   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6471   // the vectorization pipeline.
6472   if (!OrigLoop->empty()) {
6473     // If the user doesn't provide a vectorization factor, determine a
6474     // reasonable one.
6475     if (!UserVF) {
6476       VF = determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM);
6477       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
6478 
6479       // Make sure we have a VF > 1 for stress testing.
6480       if (VPlanBuildStressTest && VF < 2) {
6481         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
6482                           << "overriding computed VF.\n");
6483         VF = 4;
6484       }
6485     }
6486     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6487     assert(isPowerOf2_32(VF) && "VF needs to be a power of two");
6488     LLVM_DEBUG(dbgs() << "LV: Using " << (UserVF ? "user " : "") << "VF " << VF
6489                       << " to build VPlans.\n");
6490     buildVPlans(VF, VF);
6491 
6492     // For VPlan build stress testing, we bail out after VPlan construction.
6493     if (VPlanBuildStressTest)
6494       return VectorizationFactor::Disabled();
6495 
6496     return {VF, 0};
6497   }
6498 
6499   LLVM_DEBUG(
6500       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6501                 "VPlan-native path.\n");
6502   return VectorizationFactor::Disabled();
6503 }
6504 
6505 Optional<VectorizationFactor> LoopVectorizationPlanner::plan(unsigned UserVF) {
6506   assert(OrigLoop->empty() && "Inner loop expected.");
6507   Optional<unsigned> MaybeMaxVF = CM.computeMaxVF();
6508   if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved.
6509     return None;
6510 
6511   // Invalidate interleave groups if all blocks of loop will be predicated.
6512   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
6513       !useMaskedInterleavedAccesses(*TTI)) {
6514     LLVM_DEBUG(
6515         dbgs()
6516         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
6517            "which requires masked-interleaved support.\n");
6518     CM.InterleaveInfo.reset();
6519   }
6520 
6521   if (UserVF) {
6522     LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6523     assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
6524     // Collect the instructions (and their associated costs) that will be more
6525     // profitable to scalarize.
6526     CM.selectUserVectorizationFactor(UserVF);
6527     buildVPlansWithVPRecipes(UserVF, UserVF);
6528     LLVM_DEBUG(printPlans(dbgs()));
6529     return {{UserVF, 0}};
6530   }
6531 
6532   unsigned MaxVF = MaybeMaxVF.getValue();
6533   assert(MaxVF != 0 && "MaxVF is zero.");
6534 
6535   for (unsigned VF = 1; VF <= MaxVF; VF *= 2) {
6536     // Collect Uniform and Scalar instructions after vectorization with VF.
6537     CM.collectUniformsAndScalars(VF);
6538 
6539     // Collect the instructions (and their associated costs) that will be more
6540     // profitable to scalarize.
6541     if (VF > 1)
6542       CM.collectInstsToScalarize(VF);
6543   }
6544 
6545   buildVPlansWithVPRecipes(1, MaxVF);
6546   LLVM_DEBUG(printPlans(dbgs()));
6547   if (MaxVF == 1)
6548     return VectorizationFactor::Disabled();
6549 
6550   // Select the optimal vectorization factor.
6551   return CM.selectVectorizationFactor(MaxVF);
6552 }
6553 
6554 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) {
6555   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
6556                     << '\n');
6557   BestVF = VF;
6558   BestUF = UF;
6559 
6560   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
6561     return !Plan->hasVF(VF);
6562   });
6563   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
6564 }
6565 
6566 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
6567                                            DominatorTree *DT) {
6568   // Perform the actual loop transformation.
6569 
6570   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
6571   VPCallbackILV CallbackILV(ILV);
6572 
6573   VPTransformState State{BestVF, BestUF,      LI,
6574                          DT,     ILV.Builder, ILV.VectorLoopValueMap,
6575                          &ILV,   CallbackILV};
6576   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
6577   State.TripCount = ILV.getOrCreateTripCount(nullptr);
6578 
6579   //===------------------------------------------------===//
6580   //
6581   // Notice: any optimization or new instruction that go
6582   // into the code below should also be implemented in
6583   // the cost-model.
6584   //
6585   //===------------------------------------------------===//
6586 
6587   // 2. Copy and widen instructions from the old loop into the new loop.
6588   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
6589   VPlans.front()->execute(&State);
6590 
6591   // 3. Fix the vectorized code: take care of header phi's, live-outs,
6592   //    predication, updating analyses.
6593   ILV.fixVectorizedLoop();
6594 }
6595 
6596 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
6597     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
6598   BasicBlock *Latch = OrigLoop->getLoopLatch();
6599 
6600   // We create new control-flow for the vectorized loop, so the original
6601   // condition will be dead after vectorization if it's only used by the
6602   // branch.
6603   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
6604   if (Cmp && Cmp->hasOneUse())
6605     DeadInstructions.insert(Cmp);
6606 
6607   // We create new "steps" for induction variable updates to which the original
6608   // induction variables map. An original update instruction will be dead if
6609   // all its users except the induction variable are dead.
6610   for (auto &Induction : *Legal->getInductionVars()) {
6611     PHINode *Ind = Induction.first;
6612     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
6613     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
6614           return U == Ind || DeadInstructions.find(cast<Instruction>(U)) !=
6615                                  DeadInstructions.end();
6616         }))
6617       DeadInstructions.insert(IndUpdate);
6618 
6619     // We record as "Dead" also the type-casting instructions we had identified
6620     // during induction analysis. We don't need any handling for them in the
6621     // vectorized loop because we have proven that, under a proper runtime
6622     // test guarding the vectorized loop, the value of the phi, and the casted
6623     // value of the phi, are the same. The last instruction in this casting chain
6624     // will get its scalar/vector/widened def from the scalar/vector/widened def
6625     // of the respective phi node. Any other casts in the induction def-use chain
6626     // have no other uses outside the phi update chain, and will be ignored.
6627     InductionDescriptor &IndDes = Induction.second;
6628     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6629     DeadInstructions.insert(Casts.begin(), Casts.end());
6630   }
6631 }
6632 
6633 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
6634 
6635 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
6636 
6637 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
6638                                         Instruction::BinaryOps BinOp) {
6639   // When unrolling and the VF is 1, we only need to add a simple scalar.
6640   Type *Ty = Val->getType();
6641   assert(!Ty->isVectorTy() && "Val must be a scalar");
6642 
6643   if (Ty->isFloatingPointTy()) {
6644     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
6645 
6646     // Floating point operations had to be 'fast' to enable the unrolling.
6647     Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
6648     return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
6649   }
6650   Constant *C = ConstantInt::get(Ty, StartIdx);
6651   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
6652 }
6653 
6654 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
6655   SmallVector<Metadata *, 4> MDs;
6656   // Reserve first location for self reference to the LoopID metadata node.
6657   MDs.push_back(nullptr);
6658   bool IsUnrollMetadata = false;
6659   MDNode *LoopID = L->getLoopID();
6660   if (LoopID) {
6661     // First find existing loop unrolling disable metadata.
6662     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
6663       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
6664       if (MD) {
6665         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
6666         IsUnrollMetadata =
6667             S && S->getString().startswith("llvm.loop.unroll.disable");
6668       }
6669       MDs.push_back(LoopID->getOperand(i));
6670     }
6671   }
6672 
6673   if (!IsUnrollMetadata) {
6674     // Add runtime unroll disable metadata.
6675     LLVMContext &Context = L->getHeader()->getContext();
6676     SmallVector<Metadata *, 1> DisableOperands;
6677     DisableOperands.push_back(
6678         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
6679     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
6680     MDs.push_back(DisableNode);
6681     MDNode *NewLoopID = MDNode::get(Context, MDs);
6682     // Set operand 0 to refer to the loop id itself.
6683     NewLoopID->replaceOperandWith(0, NewLoopID);
6684     L->setLoopID(NewLoopID);
6685   }
6686 }
6687 
6688 bool LoopVectorizationPlanner::getDecisionAndClampRange(
6689     const std::function<bool(unsigned)> &Predicate, VFRange &Range) {
6690   assert(Range.End > Range.Start && "Trying to test an empty VF range.");
6691   bool PredicateAtRangeStart = Predicate(Range.Start);
6692 
6693   for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2)
6694     if (Predicate(TmpVF) != PredicateAtRangeStart) {
6695       Range.End = TmpVF;
6696       break;
6697     }
6698 
6699   return PredicateAtRangeStart;
6700 }
6701 
6702 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
6703 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
6704 /// of VF's starting at a given VF and extending it as much as possible. Each
6705 /// vectorization decision can potentially shorten this sub-range during
6706 /// buildVPlan().
6707 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) {
6708   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
6709     VFRange SubRange = {VF, MaxVF + 1};
6710     VPlans.push_back(buildVPlan(SubRange));
6711     VF = SubRange.End;
6712   }
6713 }
6714 
6715 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
6716                                          VPlanPtr &Plan) {
6717   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
6718 
6719   // Look for cached value.
6720   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
6721   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
6722   if (ECEntryIt != EdgeMaskCache.end())
6723     return ECEntryIt->second;
6724 
6725   VPValue *SrcMask = createBlockInMask(Src, Plan);
6726 
6727   // The terminator has to be a branch inst!
6728   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
6729   assert(BI && "Unexpected terminator found");
6730 
6731   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
6732     return EdgeMaskCache[Edge] = SrcMask;
6733 
6734   VPValue *EdgeMask = Plan->getVPValue(BI->getCondition());
6735   assert(EdgeMask && "No Edge Mask found for condition");
6736 
6737   if (BI->getSuccessor(0) != Dst)
6738     EdgeMask = Builder.createNot(EdgeMask);
6739 
6740   if (SrcMask) // Otherwise block in-mask is all-one, no need to AND.
6741     EdgeMask = Builder.createAnd(EdgeMask, SrcMask);
6742 
6743   return EdgeMaskCache[Edge] = EdgeMask;
6744 }
6745 
6746 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
6747   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
6748 
6749   // Look for cached value.
6750   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
6751   if (BCEntryIt != BlockMaskCache.end())
6752     return BCEntryIt->second;
6753 
6754   // All-one mask is modelled as no-mask following the convention for masked
6755   // load/store/gather/scatter. Initialize BlockMask to no-mask.
6756   VPValue *BlockMask = nullptr;
6757 
6758   if (OrigLoop->getHeader() == BB) {
6759     if (!CM.blockNeedsPredication(BB))
6760       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
6761 
6762     // Introduce the early-exit compare IV <= BTC to form header block mask.
6763     // This is used instead of IV < TC because TC may wrap, unlike BTC.
6764     VPValue *IV = Plan->getVPValue(Legal->getPrimaryInduction());
6765     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
6766     BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
6767     return BlockMaskCache[BB] = BlockMask;
6768   }
6769 
6770   // This is the block mask. We OR all incoming edges.
6771   for (auto *Predecessor : predecessors(BB)) {
6772     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
6773     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
6774       return BlockMaskCache[BB] = EdgeMask;
6775 
6776     if (!BlockMask) { // BlockMask has its initialized nullptr value.
6777       BlockMask = EdgeMask;
6778       continue;
6779     }
6780 
6781     BlockMask = Builder.createOr(BlockMask, EdgeMask);
6782   }
6783 
6784   return BlockMaskCache[BB] = BlockMask;
6785 }
6786 
6787 VPWidenMemoryInstructionRecipe *
6788 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range,
6789                                   VPlanPtr &Plan) {
6790   if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
6791     return nullptr;
6792 
6793   auto willWiden = [&](unsigned VF) -> bool {
6794     if (VF == 1)
6795       return false;
6796     LoopVectorizationCostModel::InstWidening Decision =
6797         CM.getWideningDecision(I, VF);
6798     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
6799            "CM decision should be taken at this point.");
6800     if (Decision == LoopVectorizationCostModel::CM_Interleave)
6801       return true;
6802     if (CM.isScalarAfterVectorization(I, VF) ||
6803         CM.isProfitableToScalarize(I, VF))
6804       return false;
6805     return Decision != LoopVectorizationCostModel::CM_Scalarize;
6806   };
6807 
6808   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
6809     return nullptr;
6810 
6811   VPValue *Mask = nullptr;
6812   if (Legal->isMaskRequired(I))
6813     Mask = createBlockInMask(I->getParent(), Plan);
6814 
6815   VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I));
6816   return new VPWidenMemoryInstructionRecipe(*I, Addr, Mask);
6817 }
6818 
6819 VPWidenIntOrFpInductionRecipe *
6820 VPRecipeBuilder::tryToOptimizeInduction(Instruction *I, VFRange &Range) {
6821   if (PHINode *Phi = dyn_cast<PHINode>(I)) {
6822     // Check if this is an integer or fp induction. If so, build the recipe that
6823     // produces its scalar and vector values.
6824     InductionDescriptor II = Legal->getInductionVars()->lookup(Phi);
6825     if (II.getKind() == InductionDescriptor::IK_IntInduction ||
6826         II.getKind() == InductionDescriptor::IK_FpInduction)
6827       return new VPWidenIntOrFpInductionRecipe(Phi);
6828 
6829     return nullptr;
6830   }
6831 
6832   // Optimize the special case where the source is a constant integer
6833   // induction variable. Notice that we can only optimize the 'trunc' case
6834   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
6835   // (c) other casts depend on pointer size.
6836 
6837   // Determine whether \p K is a truncation based on an induction variable that
6838   // can be optimized.
6839   auto isOptimizableIVTruncate =
6840       [&](Instruction *K) -> std::function<bool(unsigned)> {
6841     return
6842         [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); };
6843   };
6844 
6845   if (isa<TruncInst>(I) && LoopVectorizationPlanner::getDecisionAndClampRange(
6846                                isOptimizableIVTruncate(I), Range))
6847     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
6848                                              cast<TruncInst>(I));
6849   return nullptr;
6850 }
6851 
6852 VPBlendRecipe *VPRecipeBuilder::tryToBlend(Instruction *I, VPlanPtr &Plan) {
6853   PHINode *Phi = dyn_cast<PHINode>(I);
6854   if (!Phi || Phi->getParent() == OrigLoop->getHeader())
6855     return nullptr;
6856 
6857   // We know that all PHIs in non-header blocks are converted into selects, so
6858   // we don't have to worry about the insertion order and we can just use the
6859   // builder. At this point we generate the predication tree. There may be
6860   // duplications since this is a simple recursive scan, but future
6861   // optimizations will clean it up.
6862 
6863   SmallVector<VPValue *, 2> Masks;
6864   unsigned NumIncoming = Phi->getNumIncomingValues();
6865   for (unsigned In = 0; In < NumIncoming; In++) {
6866     VPValue *EdgeMask =
6867       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
6868     assert((EdgeMask || NumIncoming == 1) &&
6869            "Multiple predecessors with one having a full mask");
6870     if (EdgeMask)
6871       Masks.push_back(EdgeMask);
6872   }
6873   return new VPBlendRecipe(Phi, Masks);
6874 }
6875 
6876 bool VPRecipeBuilder::tryToWiden(Instruction *I, VPBasicBlock *VPBB,
6877                                  VFRange &Range) {
6878 
6879   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
6880       [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range);
6881 
6882   if (IsPredicated)
6883     return false;
6884 
6885   auto IsVectorizableOpcode = [](unsigned Opcode) {
6886     switch (Opcode) {
6887     case Instruction::Add:
6888     case Instruction::And:
6889     case Instruction::AShr:
6890     case Instruction::BitCast:
6891     case Instruction::Br:
6892     case Instruction::Call:
6893     case Instruction::FAdd:
6894     case Instruction::FCmp:
6895     case Instruction::FDiv:
6896     case Instruction::FMul:
6897     case Instruction::FNeg:
6898     case Instruction::FPExt:
6899     case Instruction::FPToSI:
6900     case Instruction::FPToUI:
6901     case Instruction::FPTrunc:
6902     case Instruction::FRem:
6903     case Instruction::FSub:
6904     case Instruction::ICmp:
6905     case Instruction::IntToPtr:
6906     case Instruction::Load:
6907     case Instruction::LShr:
6908     case Instruction::Mul:
6909     case Instruction::Or:
6910     case Instruction::PHI:
6911     case Instruction::PtrToInt:
6912     case Instruction::SDiv:
6913     case Instruction::Select:
6914     case Instruction::SExt:
6915     case Instruction::Shl:
6916     case Instruction::SIToFP:
6917     case Instruction::SRem:
6918     case Instruction::Store:
6919     case Instruction::Sub:
6920     case Instruction::Trunc:
6921     case Instruction::UDiv:
6922     case Instruction::UIToFP:
6923     case Instruction::URem:
6924     case Instruction::Xor:
6925     case Instruction::ZExt:
6926       return true;
6927     }
6928     return false;
6929   };
6930 
6931   if (!IsVectorizableOpcode(I->getOpcode()))
6932     return false;
6933 
6934   if (CallInst *CI = dyn_cast<CallInst>(I)) {
6935     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
6936     if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
6937                ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect))
6938       return false;
6939   }
6940 
6941   auto willWiden = [&](unsigned VF) -> bool {
6942     if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) ||
6943                              CM.isProfitableToScalarize(I, VF)))
6944       return false;
6945     if (CallInst *CI = dyn_cast<CallInst>(I)) {
6946       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
6947       // The following case may be scalarized depending on the VF.
6948       // The flag shows whether we use Intrinsic or a usual Call for vectorized
6949       // version of the instruction.
6950       // Is it beneficial to perform intrinsic call compared to lib call?
6951       bool NeedToScalarize;
6952       unsigned CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
6953       bool UseVectorIntrinsic =
6954           ID && CM.getVectorIntrinsicCost(CI, VF) <= CallCost;
6955       return UseVectorIntrinsic || !NeedToScalarize;
6956     }
6957     if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
6958       assert(CM.getWideningDecision(I, VF) ==
6959                  LoopVectorizationCostModel::CM_Scalarize &&
6960              "Memory widening decisions should have been taken care by now");
6961       return false;
6962     }
6963     return true;
6964   };
6965 
6966   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
6967     return false;
6968   // If this ingredient's recipe is to be recorded, keep its recipe a singleton
6969   // to avoid having to split recipes later.
6970   bool IsSingleton = Ingredient2Recipe.count(I);
6971 
6972   // Success: widen this instruction.
6973 
6974   // Use the default widening recipe. We optimize the common case where
6975   // consecutive instructions can be represented by a single recipe.
6976   if (!IsSingleton && !VPBB->empty() && LastExtensibleRecipe == &VPBB->back() &&
6977       LastExtensibleRecipe->appendInstruction(I))
6978     return true;
6979 
6980   VPWidenRecipe *WidenRecipe = new VPWidenRecipe(I);
6981   if (!IsSingleton)
6982     LastExtensibleRecipe = WidenRecipe;
6983   setRecipe(I, WidenRecipe);
6984   VPBB->appendRecipe(WidenRecipe);
6985   return true;
6986 }
6987 
6988 VPBasicBlock *VPRecipeBuilder::handleReplication(
6989     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
6990     DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
6991     VPlanPtr &Plan) {
6992   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
6993       [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); },
6994       Range);
6995 
6996   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
6997       [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range);
6998 
6999   auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated);
7000   setRecipe(I, Recipe);
7001 
7002   // Find if I uses a predicated instruction. If so, it will use its scalar
7003   // value. Avoid hoisting the insert-element which packs the scalar value into
7004   // a vector value, as that happens iff all users use the vector value.
7005   for (auto &Op : I->operands())
7006     if (auto *PredInst = dyn_cast<Instruction>(Op))
7007       if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
7008         PredInst2Recipe[PredInst]->setAlsoPack(false);
7009 
7010   // Finalize the recipe for Instr, first if it is not predicated.
7011   if (!IsPredicated) {
7012     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
7013     VPBB->appendRecipe(Recipe);
7014     return VPBB;
7015   }
7016   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
7017   assert(VPBB->getSuccessors().empty() &&
7018          "VPBB has successors when handling predicated replication.");
7019   // Record predicated instructions for above packing optimizations.
7020   PredInst2Recipe[I] = Recipe;
7021   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
7022   VPBlockUtils::insertBlockAfter(Region, VPBB);
7023   auto *RegSucc = new VPBasicBlock();
7024   VPBlockUtils::insertBlockAfter(RegSucc, Region);
7025   return RegSucc;
7026 }
7027 
7028 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
7029                                                       VPRecipeBase *PredRecipe,
7030                                                       VPlanPtr &Plan) {
7031   // Instructions marked for predication are replicated and placed under an
7032   // if-then construct to prevent side-effects.
7033 
7034   // Generate recipes to compute the block mask for this region.
7035   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
7036 
7037   // Build the triangular if-then region.
7038   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
7039   assert(Instr->getParent() && "Predicated instruction not in any basic block");
7040   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
7041   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
7042   auto *PHIRecipe =
7043       Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr);
7044   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
7045   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
7046   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
7047 
7048   // Note: first set Entry as region entry and then connect successors starting
7049   // from it in order, to propagate the "parent" of each VPBasicBlock.
7050   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
7051   VPBlockUtils::connectBlocks(Pred, Exit);
7052 
7053   return Region;
7054 }
7055 
7056 bool VPRecipeBuilder::tryToCreateRecipe(Instruction *Instr, VFRange &Range,
7057                                         VPlanPtr &Plan, VPBasicBlock *VPBB) {
7058   VPRecipeBase *Recipe = nullptr;
7059 
7060   // First, check for specific widening recipes that deal with memory
7061   // operations, inductions and Phi nodes.
7062   if ((Recipe = tryToWidenMemory(Instr, Range, Plan)) ||
7063       (Recipe = tryToOptimizeInduction(Instr, Range)) ||
7064       (Recipe = tryToBlend(Instr, Plan)) ||
7065       (isa<PHINode>(Instr) &&
7066        (Recipe = new VPWidenPHIRecipe(cast<PHINode>(Instr))))) {
7067     setRecipe(Instr, Recipe);
7068     VPBB->appendRecipe(Recipe);
7069     return true;
7070   }
7071 
7072   // Handle GEP widening.
7073   if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Instr)) {
7074     auto Scalarize = [&](unsigned VF) {
7075       return CM.isScalarWithPredication(Instr, VF) ||
7076              CM.isScalarAfterVectorization(Instr, VF) ||
7077              CM.isProfitableToScalarize(Instr, VF);
7078     };
7079     if (LoopVectorizationPlanner::getDecisionAndClampRange(Scalarize, Range))
7080       return false;
7081     VPWidenGEPRecipe *Recipe = new VPWidenGEPRecipe(GEP, OrigLoop);
7082     setRecipe(Instr, Recipe);
7083     VPBB->appendRecipe(Recipe);
7084     return true;
7085   }
7086 
7087   // Check if Instr is to be widened by a general VPWidenRecipe, after
7088   // having first checked for specific widening recipes.
7089   if (tryToWiden(Instr, VPBB, Range))
7090     return true;
7091 
7092   return false;
7093 }
7094 
7095 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF,
7096                                                         unsigned MaxVF) {
7097   assert(OrigLoop->empty() && "Inner loop expected.");
7098 
7099   // Collect conditions feeding internal conditional branches; they need to be
7100   // represented in VPlan for it to model masking.
7101   SmallPtrSet<Value *, 1> NeedDef;
7102 
7103   auto *Latch = OrigLoop->getLoopLatch();
7104   for (BasicBlock *BB : OrigLoop->blocks()) {
7105     if (BB == Latch)
7106       continue;
7107     BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator());
7108     if (Branch && Branch->isConditional())
7109       NeedDef.insert(Branch->getCondition());
7110   }
7111 
7112   // If the tail is to be folded by masking, the primary induction variable
7113   // needs to be represented in VPlan for it to model early-exit masking.
7114   // Also, both the Phi and the live-out instruction of each reduction are
7115   // required in order to introduce a select between them in VPlan.
7116   if (CM.foldTailByMasking()) {
7117     NeedDef.insert(Legal->getPrimaryInduction());
7118     for (auto &Reduction : *Legal->getReductionVars()) {
7119       NeedDef.insert(Reduction.first);
7120       NeedDef.insert(Reduction.second.getLoopExitInstr());
7121     }
7122   }
7123 
7124   // Collect instructions from the original loop that will become trivially dead
7125   // in the vectorized loop. We don't need to vectorize these instructions. For
7126   // example, original induction update instructions can become dead because we
7127   // separately emit induction "steps" when generating code for the new loop.
7128   // Similarly, we create a new latch condition when setting up the structure
7129   // of the new loop, so the old one can become dead.
7130   SmallPtrSet<Instruction *, 4> DeadInstructions;
7131   collectTriviallyDeadInstructions(DeadInstructions);
7132 
7133   // Add assume instructions we need to drop to DeadInstructions, to prevent
7134   // them from being added to the VPlan.
7135   // TODO: We only need to drop assumes in blocks that get flattend. If the
7136   // control flow is preserved, we should keep them.
7137   auto &ConditionalAssumes = Legal->getConditionalAssumes();
7138   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
7139 
7140   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
7141   // Dead instructions do not need sinking. Remove them from SinkAfter.
7142   for (Instruction *I : DeadInstructions)
7143     SinkAfter.erase(I);
7144 
7145   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
7146     VFRange SubRange = {VF, MaxVF + 1};
7147     VPlans.push_back(buildVPlanWithVPRecipes(SubRange, NeedDef,
7148                                              DeadInstructions, SinkAfter));
7149     VF = SubRange.End;
7150   }
7151 }
7152 
7153 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
7154     VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef,
7155     SmallPtrSetImpl<Instruction *> &DeadInstructions,
7156     const DenseMap<Instruction *, Instruction *> &SinkAfter) {
7157 
7158   // Hold a mapping from predicated instructions to their recipes, in order to
7159   // fix their AlsoPack behavior if a user is determined to replicate and use a
7160   // scalar instead of vector value.
7161   DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
7162 
7163   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
7164 
7165   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, Builder);
7166 
7167   // ---------------------------------------------------------------------------
7168   // Pre-construction: record ingredients whose recipes we'll need to further
7169   // process after constructing the initial VPlan.
7170   // ---------------------------------------------------------------------------
7171 
7172   // Mark instructions we'll need to sink later and their targets as
7173   // ingredients whose recipe we'll need to record.
7174   for (auto &Entry : SinkAfter) {
7175     RecipeBuilder.recordRecipeOf(Entry.first);
7176     RecipeBuilder.recordRecipeOf(Entry.second);
7177   }
7178 
7179   // For each interleave group which is relevant for this (possibly trimmed)
7180   // Range, add it to the set of groups to be later applied to the VPlan and add
7181   // placeholders for its members' Recipes which we'll be replacing with a
7182   // single VPInterleaveRecipe.
7183   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
7184     auto applyIG = [IG, this](unsigned VF) -> bool {
7185       return (VF >= 2 && // Query is illegal for VF == 1
7186               CM.getWideningDecision(IG->getInsertPos(), VF) ==
7187                   LoopVectorizationCostModel::CM_Interleave);
7188     };
7189     if (!getDecisionAndClampRange(applyIG, Range))
7190       continue;
7191     InterleaveGroups.insert(IG);
7192     for (unsigned i = 0; i < IG->getFactor(); i++)
7193       if (Instruction *Member = IG->getMember(i))
7194         RecipeBuilder.recordRecipeOf(Member);
7195   };
7196 
7197   // ---------------------------------------------------------------------------
7198   // Build initial VPlan: Scan the body of the loop in a topological order to
7199   // visit each basic block after having visited its predecessor basic blocks.
7200   // ---------------------------------------------------------------------------
7201 
7202   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
7203   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
7204   auto Plan = std::make_unique<VPlan>(VPBB);
7205 
7206   // Represent values that will have defs inside VPlan.
7207   for (Value *V : NeedDef)
7208     Plan->addVPValue(V);
7209 
7210   // Scan the body of the loop in a topological order to visit each basic block
7211   // after having visited its predecessor basic blocks.
7212   LoopBlocksDFS DFS(OrigLoop);
7213   DFS.perform(LI);
7214 
7215   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
7216     // Relevant instructions from basic block BB will be grouped into VPRecipe
7217     // ingredients and fill a new VPBasicBlock.
7218     unsigned VPBBsForBB = 0;
7219     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
7220     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
7221     VPBB = FirstVPBBForBB;
7222     Builder.setInsertPoint(VPBB);
7223 
7224     // Introduce each ingredient into VPlan.
7225     for (Instruction &I : BB->instructionsWithoutDebug()) {
7226       Instruction *Instr = &I;
7227 
7228       // First filter out irrelevant instructions, to ensure no recipes are
7229       // built for them.
7230       if (isa<BranchInst>(Instr) ||
7231           DeadInstructions.find(Instr) != DeadInstructions.end())
7232         continue;
7233 
7234       if (RecipeBuilder.tryToCreateRecipe(Instr, Range, Plan, VPBB))
7235         continue;
7236 
7237       // Otherwise, if all widening options failed, Instruction is to be
7238       // replicated. This may create a successor for VPBB.
7239       VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication(
7240           Instr, Range, VPBB, PredInst2Recipe, Plan);
7241       if (NextVPBB != VPBB) {
7242         VPBB = NextVPBB;
7243         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
7244                                     : "");
7245       }
7246     }
7247   }
7248 
7249   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
7250   // may also be empty, such as the last one VPBB, reflecting original
7251   // basic-blocks with no recipes.
7252   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
7253   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
7254   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
7255   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
7256   delete PreEntry;
7257 
7258   // ---------------------------------------------------------------------------
7259   // Transform initial VPlan: Apply previously taken decisions, in order, to
7260   // bring the VPlan to its final state.
7261   // ---------------------------------------------------------------------------
7262 
7263   // Apply Sink-After legal constraints.
7264   for (auto &Entry : SinkAfter) {
7265     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
7266     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
7267     Sink->moveAfter(Target);
7268   }
7269 
7270   // Interleave memory: for each Interleave Group we marked earlier as relevant
7271   // for this VPlan, replace the Recipes widening its memory instructions with a
7272   // single VPInterleaveRecipe at its insertion point.
7273   for (auto IG : InterleaveGroups) {
7274     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
7275         RecipeBuilder.getRecipe(IG->getInsertPos()));
7276     (new VPInterleaveRecipe(IG, Recipe->getAddr(), Recipe->getMask()))
7277         ->insertBefore(Recipe);
7278 
7279     for (unsigned i = 0; i < IG->getFactor(); ++i)
7280       if (Instruction *Member = IG->getMember(i)) {
7281         RecipeBuilder.getRecipe(Member)->eraseFromParent();
7282       }
7283   }
7284 
7285   // Finally, if tail is folded by masking, introduce selects between the phi
7286   // and the live-out instruction of each reduction, at the end of the latch.
7287   if (CM.foldTailByMasking()) {
7288     Builder.setInsertPoint(VPBB);
7289     auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
7290     for (auto &Reduction : *Legal->getReductionVars()) {
7291       VPValue *Phi = Plan->getVPValue(Reduction.first);
7292       VPValue *Red = Plan->getVPValue(Reduction.second.getLoopExitInstr());
7293       Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
7294     }
7295   }
7296 
7297   std::string PlanName;
7298   raw_string_ostream RSO(PlanName);
7299   unsigned VF = Range.Start;
7300   Plan->addVF(VF);
7301   RSO << "Initial VPlan for VF={" << VF;
7302   for (VF *= 2; VF < Range.End; VF *= 2) {
7303     Plan->addVF(VF);
7304     RSO << "," << VF;
7305   }
7306   RSO << "},UF>=1";
7307   RSO.flush();
7308   Plan->setName(PlanName);
7309 
7310   return Plan;
7311 }
7312 
7313 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
7314   // Outer loop handling: They may require CFG and instruction level
7315   // transformations before even evaluating whether vectorization is profitable.
7316   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7317   // the vectorization pipeline.
7318   assert(!OrigLoop->empty());
7319   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7320 
7321   // Create new empty VPlan
7322   auto Plan = std::make_unique<VPlan>();
7323 
7324   // Build hierarchical CFG
7325   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
7326   HCFGBuilder.buildHierarchicalCFG();
7327 
7328   for (unsigned VF = Range.Start; VF < Range.End; VF *= 2)
7329     Plan->addVF(VF);
7330 
7331   if (EnableVPlanPredication) {
7332     VPlanPredicator VPP(*Plan);
7333     VPP.predicate();
7334 
7335     // Avoid running transformation to recipes until masked code generation in
7336     // VPlan-native path is in place.
7337     return Plan;
7338   }
7339 
7340   SmallPtrSet<Instruction *, 1> DeadInstructions;
7341   VPlanTransforms::VPInstructionsToVPRecipes(
7342       OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions);
7343   return Plan;
7344 }
7345 
7346 Value* LoopVectorizationPlanner::VPCallbackILV::
7347 getOrCreateVectorValues(Value *V, unsigned Part) {
7348       return ILV.getOrCreateVectorValue(V, Part);
7349 }
7350 
7351 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue(
7352     Value *V, const VPIteration &Instance) {
7353   return ILV.getOrCreateScalarValue(V, Instance);
7354 }
7355 
7356 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent) const {
7357   O << " +\n"
7358     << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
7359   IG->getInsertPos()->printAsOperand(O, false);
7360   O << ", ";
7361   getAddr()->printAsOperand(O);
7362   VPValue *Mask = getMask();
7363   if (Mask) {
7364     O << ", ";
7365     Mask->printAsOperand(O);
7366   }
7367   O << "\\l\"";
7368   for (unsigned i = 0; i < IG->getFactor(); ++i)
7369     if (Instruction *I = IG->getMember(i))
7370       O << " +\n"
7371         << Indent << "\"  " << VPlanIngredient(I) << " " << i << "\\l\"";
7372 }
7373 
7374 void VPWidenRecipe::execute(VPTransformState &State) {
7375   for (auto &Instr : make_range(Begin, End))
7376     State.ILV->widenInstruction(Instr);
7377 }
7378 
7379 void VPWidenGEPRecipe::execute(VPTransformState &State) {
7380   State.ILV->widenGEP(GEP, State.UF, State.VF, IsPtrLoopInvariant,
7381                       IsIndexLoopInvariant);
7382 }
7383 
7384 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
7385   assert(!State.Instance && "Int or FP induction being replicated.");
7386   State.ILV->widenIntOrFpInduction(IV, Trunc);
7387 }
7388 
7389 void VPWidenPHIRecipe::execute(VPTransformState &State) {
7390   State.ILV->widenPHIInstruction(Phi, State.UF, State.VF);
7391 }
7392 
7393 void VPBlendRecipe::execute(VPTransformState &State) {
7394   State.ILV->setDebugLocFromInst(State.Builder, Phi);
7395   // We know that all PHIs in non-header blocks are converted into
7396   // selects, so we don't have to worry about the insertion order and we
7397   // can just use the builder.
7398   // At this point we generate the predication tree. There may be
7399   // duplications since this is a simple recursive scan, but future
7400   // optimizations will clean it up.
7401 
7402   unsigned NumIncoming = Phi->getNumIncomingValues();
7403 
7404   assert((User || NumIncoming == 1) &&
7405          "Multiple predecessors with predecessors having a full mask");
7406   // Generate a sequence of selects of the form:
7407   // SELECT(Mask3, In3,
7408   //      SELECT(Mask2, In2,
7409   //                   ( ...)))
7410   InnerLoopVectorizer::VectorParts Entry(State.UF);
7411   for (unsigned In = 0; In < NumIncoming; ++In) {
7412     for (unsigned Part = 0; Part < State.UF; ++Part) {
7413       // We might have single edge PHIs (blocks) - use an identity
7414       // 'select' for the first PHI operand.
7415       Value *In0 =
7416           State.ILV->getOrCreateVectorValue(Phi->getIncomingValue(In), Part);
7417       if (In == 0)
7418         Entry[Part] = In0; // Initialize with the first incoming value.
7419       else {
7420         // Select between the current value and the previous incoming edge
7421         // based on the incoming mask.
7422         Value *Cond = State.get(User->getOperand(In), Part);
7423         Entry[Part] =
7424             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
7425       }
7426     }
7427   }
7428   for (unsigned Part = 0; Part < State.UF; ++Part)
7429     State.ValueMap.setVectorValue(Phi, Part, Entry[Part]);
7430 }
7431 
7432 void VPInterleaveRecipe::execute(VPTransformState &State) {
7433   assert(!State.Instance && "Interleave group being replicated.");
7434   State.ILV->vectorizeInterleaveGroup(IG->getInsertPos(), State, getAddr(),
7435                                       getMask());
7436 }
7437 
7438 void VPReplicateRecipe::execute(VPTransformState &State) {
7439   if (State.Instance) { // Generate a single instance.
7440     State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated);
7441     // Insert scalar instance packing it into a vector.
7442     if (AlsoPack && State.VF > 1) {
7443       // If we're constructing lane 0, initialize to start from undef.
7444       if (State.Instance->Lane == 0) {
7445         Value *Undef =
7446             UndefValue::get(VectorType::get(Ingredient->getType(), State.VF));
7447         State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef);
7448       }
7449       State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance);
7450     }
7451     return;
7452   }
7453 
7454   // Generate scalar instances for all VF lanes of all UF parts, unless the
7455   // instruction is uniform inwhich case generate only the first lane for each
7456   // of the UF parts.
7457   unsigned EndLane = IsUniform ? 1 : State.VF;
7458   for (unsigned Part = 0; Part < State.UF; ++Part)
7459     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
7460       State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated);
7461 }
7462 
7463 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
7464   assert(State.Instance && "Branch on Mask works only on single instance.");
7465 
7466   unsigned Part = State.Instance->Part;
7467   unsigned Lane = State.Instance->Lane;
7468 
7469   Value *ConditionBit = nullptr;
7470   if (!User) // Block in mask is all-one.
7471     ConditionBit = State.Builder.getTrue();
7472   else {
7473     VPValue *BlockInMask = User->getOperand(0);
7474     ConditionBit = State.get(BlockInMask, Part);
7475     if (ConditionBit->getType()->isVectorTy())
7476       ConditionBit = State.Builder.CreateExtractElement(
7477           ConditionBit, State.Builder.getInt32(Lane));
7478   }
7479 
7480   // Replace the temporary unreachable terminator with a new conditional branch,
7481   // whose two destinations will be set later when they are created.
7482   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
7483   assert(isa<UnreachableInst>(CurrentTerminator) &&
7484          "Expected to replace unreachable terminator with conditional branch.");
7485   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
7486   CondBr->setSuccessor(0, nullptr);
7487   ReplaceInstWithInst(CurrentTerminator, CondBr);
7488 }
7489 
7490 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
7491   assert(State.Instance && "Predicated instruction PHI works per instance.");
7492   Instruction *ScalarPredInst = cast<Instruction>(
7493       State.ValueMap.getScalarValue(PredInst, *State.Instance));
7494   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
7495   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
7496   assert(PredicatingBB && "Predicated block has no single predecessor.");
7497 
7498   // By current pack/unpack logic we need to generate only a single phi node: if
7499   // a vector value for the predicated instruction exists at this point it means
7500   // the instruction has vector users only, and a phi for the vector value is
7501   // needed. In this case the recipe of the predicated instruction is marked to
7502   // also do that packing, thereby "hoisting" the insert-element sequence.
7503   // Otherwise, a phi node for the scalar value is needed.
7504   unsigned Part = State.Instance->Part;
7505   if (State.ValueMap.hasVectorValue(PredInst, Part)) {
7506     Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part);
7507     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
7508     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
7509     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
7510     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
7511     State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache.
7512   } else {
7513     Type *PredInstType = PredInst->getType();
7514     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
7515     Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB);
7516     Phi->addIncoming(ScalarPredInst, PredicatedBB);
7517     State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi);
7518   }
7519 }
7520 
7521 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
7522   State.ILV->vectorizeMemoryInstruction(&Instr, State, getAddr(), getMask());
7523 }
7524 
7525 // Determine how to lower the scalar epilogue, which depends on 1) optimising
7526 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
7527 // predication, and 4) a TTI hook that analyses whether the loop is suitable
7528 // for predication.
7529 static ScalarEpilogueLowering getScalarEpilogueLowering(
7530     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
7531     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
7532     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
7533     LoopVectorizationLegality &LVL) {
7534   bool OptSize =
7535       F->hasOptSize() || llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
7536                                                      PGSOQueryType::IRPass);
7537   // 1) OptSize takes precedence over all other options, i.e. if this is set,
7538   // don't look at hints or options, and don't request a scalar epilogue.
7539   if (OptSize && Hints.getForce() != LoopVectorizeHints::FK_Enabled)
7540     return CM_ScalarEpilogueNotAllowedOptSize;
7541 
7542   bool PredicateOptDisabled = PreferPredicateOverEpilog.getNumOccurrences() &&
7543                               !PreferPredicateOverEpilog;
7544 
7545   // 2) Next, if disabling predication is requested on the command line, honour
7546   // this and request a scalar epilogue. Also do this if we don't have a
7547   // primary induction variable, which is required for predication.
7548   if (PredicateOptDisabled || !LVL.getPrimaryInduction())
7549     return CM_ScalarEpilogueAllowed;
7550 
7551   // 3) and 4) look if enabling predication is requested on the command line,
7552   // with a loop hint, or if the TTI hook indicates this is profitable, request
7553   // predication .
7554   if (PreferPredicateOverEpilog ||
7555       Hints.getPredicate() == LoopVectorizeHints::FK_Enabled ||
7556       (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
7557                                         LVL.getLAI()) &&
7558        Hints.getPredicate() != LoopVectorizeHints::FK_Disabled))
7559     return CM_ScalarEpilogueNotNeededUsePredicate;
7560 
7561   return CM_ScalarEpilogueAllowed;
7562 }
7563 
7564 // Process the loop in the VPlan-native vectorization path. This path builds
7565 // VPlan upfront in the vectorization pipeline, which allows to apply
7566 // VPlan-to-VPlan transformations from the very beginning without modifying the
7567 // input LLVM IR.
7568 static bool processLoopInVPlanNativePath(
7569     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
7570     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
7571     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
7572     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
7573     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) {
7574 
7575   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
7576   Function *F = L->getHeader()->getParent();
7577   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
7578 
7579   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
7580       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
7581 
7582   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
7583                                 &Hints, IAI);
7584   // Use the planner for outer loop vectorization.
7585   // TODO: CM is not used at this point inside the planner. Turn CM into an
7586   // optional argument if we don't need it in the future.
7587   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI);
7588 
7589   // Get user vectorization factor.
7590   const unsigned UserVF = Hints.getWidth();
7591 
7592   // Plan how to best vectorize, return the best VF and its cost.
7593   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
7594 
7595   // If we are stress testing VPlan builds, do not attempt to generate vector
7596   // code. Masked vector code generation support will follow soon.
7597   // Also, do not attempt to vectorize if no vector code will be produced.
7598   if (VPlanBuildStressTest || EnableVPlanPredication ||
7599       VectorizationFactor::Disabled() == VF)
7600     return false;
7601 
7602   LVP.setBestPlan(VF.Width, 1);
7603 
7604   InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
7605                          &CM);
7606   LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
7607                     << L->getHeader()->getParent()->getName() << "\"\n");
7608   LVP.executePlan(LB, DT);
7609 
7610   // Mark the loop as already vectorized to avoid vectorizing again.
7611   Hints.setAlreadyVectorized();
7612 
7613   LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent()));
7614   return true;
7615 }
7616 
7617 bool LoopVectorizePass::processLoop(Loop *L) {
7618   assert((EnableVPlanNativePath || L->empty()) &&
7619          "VPlan-native path is not enabled. Only process inner loops.");
7620 
7621 #ifndef NDEBUG
7622   const std::string DebugLocStr = getDebugLocString(L);
7623 #endif /* NDEBUG */
7624 
7625   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
7626                     << L->getHeader()->getParent()->getName() << "\" from "
7627                     << DebugLocStr << "\n");
7628 
7629   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
7630 
7631   LLVM_DEBUG(
7632       dbgs() << "LV: Loop hints:"
7633              << " force="
7634              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
7635                      ? "disabled"
7636                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
7637                             ? "enabled"
7638                             : "?"))
7639              << " width=" << Hints.getWidth()
7640              << " unroll=" << Hints.getInterleave() << "\n");
7641 
7642   // Function containing loop
7643   Function *F = L->getHeader()->getParent();
7644 
7645   // Looking at the diagnostic output is the only way to determine if a loop
7646   // was vectorized (other than looking at the IR or machine code), so it
7647   // is important to generate an optimization remark for each loop. Most of
7648   // these messages are generated as OptimizationRemarkAnalysis. Remarks
7649   // generated as OptimizationRemark and OptimizationRemarkMissed are
7650   // less verbose reporting vectorized loops and unvectorized loops that may
7651   // benefit from vectorization, respectively.
7652 
7653   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
7654     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
7655     return false;
7656   }
7657 
7658   PredicatedScalarEvolution PSE(*SE, *L);
7659 
7660   // Check if it is legal to vectorize the loop.
7661   LoopVectorizationRequirements Requirements(*ORE);
7662   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
7663                                 &Requirements, &Hints, DB, AC);
7664   if (!LVL.canVectorize(EnableVPlanNativePath)) {
7665     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
7666     Hints.emitRemarkWithHints();
7667     return false;
7668   }
7669 
7670   // Check the function attributes and profiles to find out if this function
7671   // should be optimized for size.
7672   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
7673       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
7674 
7675   // Entrance to the VPlan-native vectorization path. Outer loops are processed
7676   // here. They may require CFG and instruction level transformations before
7677   // even evaluating whether vectorization is profitable. Since we cannot modify
7678   // the incoming IR, we need to build VPlan upfront in the vectorization
7679   // pipeline.
7680   if (!L->empty())
7681     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
7682                                         ORE, BFI, PSI, Hints);
7683 
7684   assert(L->empty() && "Inner loop expected.");
7685 
7686   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
7687   // count by optimizing for size, to minimize overheads.
7688   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
7689   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
7690     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
7691                       << "This loop is worth vectorizing only if no scalar "
7692                       << "iteration overheads are incurred.");
7693     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
7694       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
7695     else {
7696       LLVM_DEBUG(dbgs() << "\n");
7697       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
7698     }
7699   }
7700 
7701   // Check the function attributes to see if implicit floats are allowed.
7702   // FIXME: This check doesn't seem possibly correct -- what if the loop is
7703   // an integer loop and the vector instructions selected are purely integer
7704   // vector instructions?
7705   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
7706     reportVectorizationFailure(
7707         "Can't vectorize when the NoImplicitFloat attribute is used",
7708         "loop not vectorized due to NoImplicitFloat attribute",
7709         "NoImplicitFloat", ORE, L);
7710     Hints.emitRemarkWithHints();
7711     return false;
7712   }
7713 
7714   // Check if the target supports potentially unsafe FP vectorization.
7715   // FIXME: Add a check for the type of safety issue (denormal, signaling)
7716   // for the target we're vectorizing for, to make sure none of the
7717   // additional fp-math flags can help.
7718   if (Hints.isPotentiallyUnsafe() &&
7719       TTI->isFPVectorizationPotentiallyUnsafe()) {
7720     reportVectorizationFailure(
7721         "Potentially unsafe FP op prevents vectorization",
7722         "loop not vectorized due to unsafe FP support.",
7723         "UnsafeFP", ORE, L);
7724     Hints.emitRemarkWithHints();
7725     return false;
7726   }
7727 
7728   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
7729   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
7730 
7731   // If an override option has been passed in for interleaved accesses, use it.
7732   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
7733     UseInterleaved = EnableInterleavedMemAccesses;
7734 
7735   // Analyze interleaved memory accesses.
7736   if (UseInterleaved) {
7737     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
7738   }
7739 
7740   // Use the cost model.
7741   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
7742                                 F, &Hints, IAI);
7743   CM.collectValuesToIgnore();
7744 
7745   // Use the planner for vectorization.
7746   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI);
7747 
7748   // Get user vectorization factor.
7749   unsigned UserVF = Hints.getWidth();
7750 
7751   // Plan how to best vectorize, return the best VF and its cost.
7752   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF);
7753 
7754   VectorizationFactor VF = VectorizationFactor::Disabled();
7755   unsigned IC = 1;
7756   unsigned UserIC = Hints.getInterleave();
7757 
7758   if (MaybeVF) {
7759     VF = *MaybeVF;
7760     // Select the interleave count.
7761     IC = CM.selectInterleaveCount(VF.Width, VF.Cost);
7762   }
7763 
7764   // Identify the diagnostic messages that should be produced.
7765   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
7766   bool VectorizeLoop = true, InterleaveLoop = true;
7767   if (Requirements.doesNotMeet(F, L, Hints)) {
7768     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
7769                          "requirements.\n");
7770     Hints.emitRemarkWithHints();
7771     return false;
7772   }
7773 
7774   if (VF.Width == 1) {
7775     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
7776     VecDiagMsg = std::make_pair(
7777         "VectorizationNotBeneficial",
7778         "the cost-model indicates that vectorization is not beneficial");
7779     VectorizeLoop = false;
7780   }
7781 
7782   if (!MaybeVF && UserIC > 1) {
7783     // Tell the user interleaving was avoided up-front, despite being explicitly
7784     // requested.
7785     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
7786                          "interleaving should be avoided up front\n");
7787     IntDiagMsg = std::make_pair(
7788         "InterleavingAvoided",
7789         "Ignoring UserIC, because interleaving was avoided up front");
7790     InterleaveLoop = false;
7791   } else if (IC == 1 && UserIC <= 1) {
7792     // Tell the user interleaving is not beneficial.
7793     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
7794     IntDiagMsg = std::make_pair(
7795         "InterleavingNotBeneficial",
7796         "the cost-model indicates that interleaving is not beneficial");
7797     InterleaveLoop = false;
7798     if (UserIC == 1) {
7799       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
7800       IntDiagMsg.second +=
7801           " and is explicitly disabled or interleave count is set to 1";
7802     }
7803   } else if (IC > 1 && UserIC == 1) {
7804     // Tell the user interleaving is beneficial, but it explicitly disabled.
7805     LLVM_DEBUG(
7806         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
7807     IntDiagMsg = std::make_pair(
7808         "InterleavingBeneficialButDisabled",
7809         "the cost-model indicates that interleaving is beneficial "
7810         "but is explicitly disabled or interleave count is set to 1");
7811     InterleaveLoop = false;
7812   }
7813 
7814   // Override IC if user provided an interleave count.
7815   IC = UserIC > 0 ? UserIC : IC;
7816 
7817   // Emit diagnostic messages, if any.
7818   const char *VAPassName = Hints.vectorizeAnalysisPassName();
7819   if (!VectorizeLoop && !InterleaveLoop) {
7820     // Do not vectorize or interleaving the loop.
7821     ORE->emit([&]() {
7822       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
7823                                       L->getStartLoc(), L->getHeader())
7824              << VecDiagMsg.second;
7825     });
7826     ORE->emit([&]() {
7827       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
7828                                       L->getStartLoc(), L->getHeader())
7829              << IntDiagMsg.second;
7830     });
7831     return false;
7832   } else if (!VectorizeLoop && InterleaveLoop) {
7833     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7834     ORE->emit([&]() {
7835       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
7836                                         L->getStartLoc(), L->getHeader())
7837              << VecDiagMsg.second;
7838     });
7839   } else if (VectorizeLoop && !InterleaveLoop) {
7840     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
7841                       << ") in " << DebugLocStr << '\n');
7842     ORE->emit([&]() {
7843       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
7844                                         L->getStartLoc(), L->getHeader())
7845              << IntDiagMsg.second;
7846     });
7847   } else if (VectorizeLoop && InterleaveLoop) {
7848     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
7849                       << ") in " << DebugLocStr << '\n');
7850     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7851   }
7852 
7853   LVP.setBestPlan(VF.Width, IC);
7854 
7855   using namespace ore;
7856   bool DisableRuntimeUnroll = false;
7857   MDNode *OrigLoopID = L->getLoopID();
7858 
7859   if (!VectorizeLoop) {
7860     assert(IC > 1 && "interleave count should not be 1 or 0");
7861     // If we decided that it is not legal to vectorize the loop, then
7862     // interleave it.
7863     InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
7864                                &CM);
7865     LVP.executePlan(Unroller, DT);
7866 
7867     ORE->emit([&]() {
7868       return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
7869                                 L->getHeader())
7870              << "interleaved loop (interleaved count: "
7871              << NV("InterleaveCount", IC) << ")";
7872     });
7873   } else {
7874     // If we decided that it is *legal* to vectorize the loop, then do it.
7875     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
7876                            &LVL, &CM);
7877     LVP.executePlan(LB, DT);
7878     ++LoopsVectorized;
7879 
7880     // Add metadata to disable runtime unrolling a scalar loop when there are
7881     // no runtime checks about strides and memory. A scalar loop that is
7882     // rarely used is not worth unrolling.
7883     if (!LB.areSafetyChecksAdded())
7884       DisableRuntimeUnroll = true;
7885 
7886     // Report the vectorization decision.
7887     ORE->emit([&]() {
7888       return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
7889                                 L->getHeader())
7890              << "vectorized loop (vectorization width: "
7891              << NV("VectorizationFactor", VF.Width)
7892              << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
7893     });
7894   }
7895 
7896   Optional<MDNode *> RemainderLoopID =
7897       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
7898                                       LLVMLoopVectorizeFollowupEpilogue});
7899   if (RemainderLoopID.hasValue()) {
7900     L->setLoopID(RemainderLoopID.getValue());
7901   } else {
7902     if (DisableRuntimeUnroll)
7903       AddRuntimeUnrollDisableMetaData(L);
7904 
7905     // Mark the loop as already vectorized to avoid vectorizing again.
7906     Hints.setAlreadyVectorized();
7907   }
7908 
7909   LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent()));
7910   return true;
7911 }
7912 
7913 bool LoopVectorizePass::runImpl(
7914     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
7915     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
7916     DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_,
7917     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
7918     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
7919   SE = &SE_;
7920   LI = &LI_;
7921   TTI = &TTI_;
7922   DT = &DT_;
7923   BFI = &BFI_;
7924   TLI = TLI_;
7925   AA = &AA_;
7926   AC = &AC_;
7927   GetLAA = &GetLAA_;
7928   DB = &DB_;
7929   ORE = &ORE_;
7930   PSI = PSI_;
7931 
7932   // Don't attempt if
7933   // 1. the target claims to have no vector registers, and
7934   // 2. interleaving won't help ILP.
7935   //
7936   // The second condition is necessary because, even if the target has no
7937   // vector registers, loop vectorization may still enable scalar
7938   // interleaving.
7939   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
7940       TTI->getMaxInterleaveFactor(1) < 2)
7941     return false;
7942 
7943   bool Changed = false;
7944 
7945   // The vectorizer requires loops to be in simplified form.
7946   // Since simplification may add new inner loops, it has to run before the
7947   // legality and profitability checks. This means running the loop vectorizer
7948   // will simplify all loops, regardless of whether anything end up being
7949   // vectorized.
7950   for (auto &L : *LI)
7951     Changed |=
7952         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
7953 
7954   // Build up a worklist of inner-loops to vectorize. This is necessary as
7955   // the act of vectorizing or partially unrolling a loop creates new loops
7956   // and can invalidate iterators across the loops.
7957   SmallVector<Loop *, 8> Worklist;
7958 
7959   for (Loop *L : *LI)
7960     collectSupportedLoops(*L, LI, ORE, Worklist);
7961 
7962   LoopsAnalyzed += Worklist.size();
7963 
7964   // Now walk the identified inner loops.
7965   while (!Worklist.empty()) {
7966     Loop *L = Worklist.pop_back_val();
7967 
7968     // For the inner loops we actually process, form LCSSA to simplify the
7969     // transform.
7970     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
7971 
7972     Changed |= processLoop(L);
7973   }
7974 
7975   // Process each loop nest in the function.
7976   return Changed;
7977 }
7978 
7979 PreservedAnalyses LoopVectorizePass::run(Function &F,
7980                                          FunctionAnalysisManager &AM) {
7981     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
7982     auto &LI = AM.getResult<LoopAnalysis>(F);
7983     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
7984     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
7985     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
7986     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
7987     auto &AA = AM.getResult<AAManager>(F);
7988     auto &AC = AM.getResult<AssumptionAnalysis>(F);
7989     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
7990     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
7991     MemorySSA *MSSA = EnableMSSALoopDependency
7992                           ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
7993                           : nullptr;
7994 
7995     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
7996     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
7997         [&](Loop &L) -> const LoopAccessInfo & {
7998       LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, MSSA};
7999       return LAM.getResult<LoopAccessAnalysis>(L, AR);
8000     };
8001     const ModuleAnalysisManager &MAM =
8002         AM.getResult<ModuleAnalysisManagerFunctionProxy>(F).getManager();
8003     ProfileSummaryInfo *PSI =
8004         MAM.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
8005     bool Changed =
8006         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
8007     if (!Changed)
8008       return PreservedAnalyses::all();
8009     PreservedAnalyses PA;
8010 
8011     // We currently do not preserve loopinfo/dominator analyses with outer loop
8012     // vectorization. Until this is addressed, mark these analyses as preserved
8013     // only for non-VPlan-native path.
8014     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
8015     if (!EnableVPlanNativePath) {
8016       PA.preserve<LoopAnalysis>();
8017       PA.preserve<DominatorTreeAnalysis>();
8018     }
8019     PA.preserve<BasicAA>();
8020     PA.preserve<GlobalsAA>();
8021     return PA;
8022 }
8023