1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/MemorySSA.h"
91 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
92 #include "llvm/Analysis/ProfileSummaryInfo.h"
93 #include "llvm/Analysis/ScalarEvolution.h"
94 #include "llvm/Analysis/ScalarEvolutionExpander.h"
95 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
96 #include "llvm/Analysis/TargetLibraryInfo.h"
97 #include "llvm/Analysis/TargetTransformInfo.h"
98 #include "llvm/Analysis/VectorUtils.h"
99 #include "llvm/IR/Attributes.h"
100 #include "llvm/IR/BasicBlock.h"
101 #include "llvm/IR/CFG.h"
102 #include "llvm/IR/Constant.h"
103 #include "llvm/IR/Constants.h"
104 #include "llvm/IR/DataLayout.h"
105 #include "llvm/IR/DebugInfoMetadata.h"
106 #include "llvm/IR/DebugLoc.h"
107 #include "llvm/IR/DerivedTypes.h"
108 #include "llvm/IR/DiagnosticInfo.h"
109 #include "llvm/IR/Dominators.h"
110 #include "llvm/IR/Function.h"
111 #include "llvm/IR/IRBuilder.h"
112 #include "llvm/IR/InstrTypes.h"
113 #include "llvm/IR/Instruction.h"
114 #include "llvm/IR/Instructions.h"
115 #include "llvm/IR/IntrinsicInst.h"
116 #include "llvm/IR/Intrinsics.h"
117 #include "llvm/IR/LLVMContext.h"
118 #include "llvm/IR/Metadata.h"
119 #include "llvm/IR/Module.h"
120 #include "llvm/IR/Operator.h"
121 #include "llvm/IR/Type.h"
122 #include "llvm/IR/Use.h"
123 #include "llvm/IR/User.h"
124 #include "llvm/IR/Value.h"
125 #include "llvm/IR/ValueHandle.h"
126 #include "llvm/IR/Verifier.h"
127 #include "llvm/InitializePasses.h"
128 #include "llvm/Pass.h"
129 #include "llvm/Support/Casting.h"
130 #include "llvm/Support/CommandLine.h"
131 #include "llvm/Support/Compiler.h"
132 #include "llvm/Support/Debug.h"
133 #include "llvm/Support/ErrorHandling.h"
134 #include "llvm/Support/MathExtras.h"
135 #include "llvm/Support/raw_ostream.h"
136 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
137 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
138 #include "llvm/Transforms/Utils/LoopSimplify.h"
139 #include "llvm/Transforms/Utils/LoopUtils.h"
140 #include "llvm/Transforms/Utils/LoopVersioning.h"
141 #include "llvm/Transforms/Utils/SizeOpts.h"
142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
143 #include <algorithm>
144 #include <cassert>
145 #include <cstdint>
146 #include <cstdlib>
147 #include <functional>
148 #include <iterator>
149 #include <limits>
150 #include <memory>
151 #include <string>
152 #include <tuple>
153 #include <utility>
154 
155 using namespace llvm;
156 
157 #define LV_NAME "loop-vectorize"
158 #define DEBUG_TYPE LV_NAME
159 
160 /// @{
161 /// Metadata attribute names
162 static const char *const LLVMLoopVectorizeFollowupAll =
163     "llvm.loop.vectorize.followup_all";
164 static const char *const LLVMLoopVectorizeFollowupVectorized =
165     "llvm.loop.vectorize.followup_vectorized";
166 static const char *const LLVMLoopVectorizeFollowupEpilogue =
167     "llvm.loop.vectorize.followup_epilogue";
168 /// @}
169 
170 STATISTIC(LoopsVectorized, "Number of loops vectorized");
171 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
172 
173 /// Loops with a known constant trip count below this number are vectorized only
174 /// if no scalar iteration overheads are incurred.
175 static cl::opt<unsigned> TinyTripCountVectorThreshold(
176     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
177     cl::desc("Loops with a constant trip count that is smaller than this "
178              "value are vectorized only if no scalar iteration overheads "
179              "are incurred."));
180 
181 // Indicates that an epilogue is undesired, predication is preferred.
182 // This means that the vectorizer will try to fold the loop-tail (epilogue)
183 // into the loop and predicate the loop body accordingly.
184 static cl::opt<bool> PreferPredicateOverEpilog(
185     "prefer-predicate-over-epilog", cl::init(false), cl::Hidden,
186     cl::desc("Indicate that an epilogue is undesired, predication should be "
187              "used instead."));
188 
189 static cl::opt<bool> MaximizeBandwidth(
190     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
191     cl::desc("Maximize bandwidth when selecting vectorization factor which "
192              "will be determined by the smallest type in loop."));
193 
194 static cl::opt<bool> EnableInterleavedMemAccesses(
195     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
196     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
197 
198 /// An interleave-group may need masking if it resides in a block that needs
199 /// predication, or in order to mask away gaps.
200 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
201     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
202     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
203 
204 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
205     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
206     cl::desc("We don't interleave loops with a estimated constant trip count "
207              "below this number"));
208 
209 static cl::opt<unsigned> ForceTargetNumScalarRegs(
210     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
211     cl::desc("A flag that overrides the target's number of scalar registers."));
212 
213 static cl::opt<unsigned> ForceTargetNumVectorRegs(
214     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
215     cl::desc("A flag that overrides the target's number of vector registers."));
216 
217 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
218     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
219     cl::desc("A flag that overrides the target's max interleave factor for "
220              "scalar loops."));
221 
222 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
223     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
224     cl::desc("A flag that overrides the target's max interleave factor for "
225              "vectorized loops."));
226 
227 static cl::opt<unsigned> ForceTargetInstructionCost(
228     "force-target-instruction-cost", cl::init(0), cl::Hidden,
229     cl::desc("A flag that overrides the target's expected cost for "
230              "an instruction to a single constant value. Mostly "
231              "useful for getting consistent testing."));
232 
233 static cl::opt<unsigned> SmallLoopCost(
234     "small-loop-cost", cl::init(20), cl::Hidden,
235     cl::desc(
236         "The cost of a loop that is considered 'small' by the interleaver."));
237 
238 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
239     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
240     cl::desc("Enable the use of the block frequency analysis to access PGO "
241              "heuristics minimizing code growth in cold regions and being more "
242              "aggressive in hot regions."));
243 
244 // Runtime interleave loops for load/store throughput.
245 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
246     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
247     cl::desc(
248         "Enable runtime interleaving until load/store ports are saturated"));
249 
250 /// The number of stores in a loop that are allowed to need predication.
251 static cl::opt<unsigned> NumberOfStoresToPredicate(
252     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
253     cl::desc("Max number of stores to be predicated behind an if."));
254 
255 static cl::opt<bool> EnableIndVarRegisterHeur(
256     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
257     cl::desc("Count the induction variable only once when interleaving"));
258 
259 static cl::opt<bool> EnableCondStoresVectorization(
260     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
261     cl::desc("Enable if predication of stores during vectorization."));
262 
263 static cl::opt<unsigned> MaxNestedScalarReductionIC(
264     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
265     cl::desc("The maximum interleave count to use when interleaving a scalar "
266              "reduction in a nested loop."));
267 
268 cl::opt<bool> EnableVPlanNativePath(
269     "enable-vplan-native-path", cl::init(false), cl::Hidden,
270     cl::desc("Enable VPlan-native vectorization path with "
271              "support for outer loop vectorization."));
272 
273 // FIXME: Remove this switch once we have divergence analysis. Currently we
274 // assume divergent non-backedge branches when this switch is true.
275 cl::opt<bool> EnableVPlanPredication(
276     "enable-vplan-predication", cl::init(false), cl::Hidden,
277     cl::desc("Enable VPlan-native vectorization path predicator with "
278              "support for outer loop vectorization."));
279 
280 // This flag enables the stress testing of the VPlan H-CFG construction in the
281 // VPlan-native vectorization path. It must be used in conjuction with
282 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
283 // verification of the H-CFGs built.
284 static cl::opt<bool> VPlanBuildStressTest(
285     "vplan-build-stress-test", cl::init(false), cl::Hidden,
286     cl::desc(
287         "Build VPlan for every supported loop nest in the function and bail "
288         "out right after the build (stress test the VPlan H-CFG construction "
289         "in the VPlan-native vectorization path)."));
290 
291 cl::opt<bool> llvm::EnableLoopInterleaving(
292     "interleave-loops", cl::init(true), cl::Hidden,
293     cl::desc("Enable loop interleaving in Loop vectorization passes"));
294 cl::opt<bool> llvm::EnableLoopVectorization(
295     "vectorize-loops", cl::init(true), cl::Hidden,
296     cl::desc("Run the Loop vectorization passes"));
297 
298 /// A helper function that returns the type of loaded or stored value.
299 static Type *getMemInstValueType(Value *I) {
300   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
301          "Expected Load or Store instruction");
302   if (auto *LI = dyn_cast<LoadInst>(I))
303     return LI->getType();
304   return cast<StoreInst>(I)->getValueOperand()->getType();
305 }
306 
307 /// A helper function that returns true if the given type is irregular. The
308 /// type is irregular if its allocated size doesn't equal the store size of an
309 /// element of the corresponding vector type at the given vectorization factor.
310 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) {
311   // Determine if an array of VF elements of type Ty is "bitcast compatible"
312   // with a <VF x Ty> vector.
313   if (VF > 1) {
314     auto *VectorTy = VectorType::get(Ty, VF);
315     return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy);
316   }
317 
318   // If the vectorization factor is one, we just check if an array of type Ty
319   // requires padding between elements.
320   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
321 }
322 
323 /// A helper function that returns the reciprocal of the block probability of
324 /// predicated blocks. If we return X, we are assuming the predicated block
325 /// will execute once for every X iterations of the loop header.
326 ///
327 /// TODO: We should use actual block probability here, if available. Currently,
328 ///       we always assume predicated blocks have a 50% chance of executing.
329 static unsigned getReciprocalPredBlockProb() { return 2; }
330 
331 /// A helper function that adds a 'fast' flag to floating-point operations.
332 static Value *addFastMathFlag(Value *V) {
333   if (isa<FPMathOperator>(V))
334     cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast());
335   return V;
336 }
337 
338 static Value *addFastMathFlag(Value *V, FastMathFlags FMF) {
339   if (isa<FPMathOperator>(V))
340     cast<Instruction>(V)->setFastMathFlags(FMF);
341   return V;
342 }
343 
344 /// A helper function that returns an integer or floating-point constant with
345 /// value C.
346 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
347   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
348                            : ConstantFP::get(Ty, C);
349 }
350 
351 /// Returns "best known" trip count for the specified loop \p L as defined by
352 /// the following procedure:
353 ///   1) Returns exact trip count if it is known.
354 ///   2) Returns expected trip count according to profile data if any.
355 ///   3) Returns upper bound estimate if it is known.
356 ///   4) Returns None if all of the above failed.
357 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
358   // Check if exact trip count is known.
359   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
360     return ExpectedTC;
361 
362   // Check if there is an expected trip count available from profile data.
363   if (LoopVectorizeWithBlockFrequency)
364     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
365       return EstimatedTC;
366 
367   // Check if upper bound estimate is known.
368   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
369     return ExpectedTC;
370 
371   return None;
372 }
373 
374 namespace llvm {
375 
376 /// InnerLoopVectorizer vectorizes loops which contain only one basic
377 /// block to a specified vectorization factor (VF).
378 /// This class performs the widening of scalars into vectors, or multiple
379 /// scalars. This class also implements the following features:
380 /// * It inserts an epilogue loop for handling loops that don't have iteration
381 ///   counts that are known to be a multiple of the vectorization factor.
382 /// * It handles the code generation for reduction variables.
383 /// * Scalarization (implementation using scalars) of un-vectorizable
384 ///   instructions.
385 /// InnerLoopVectorizer does not perform any vectorization-legality
386 /// checks, and relies on the caller to check for the different legality
387 /// aspects. The InnerLoopVectorizer relies on the
388 /// LoopVectorizationLegality class to provide information about the induction
389 /// and reduction variables that were found to a given vectorization factor.
390 class InnerLoopVectorizer {
391 public:
392   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
393                       LoopInfo *LI, DominatorTree *DT,
394                       const TargetLibraryInfo *TLI,
395                       const TargetTransformInfo *TTI, AssumptionCache *AC,
396                       OptimizationRemarkEmitter *ORE, unsigned VecWidth,
397                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
398                       LoopVectorizationCostModel *CM)
399       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
400         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
401         Builder(PSE.getSE()->getContext()),
402         VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {}
403   virtual ~InnerLoopVectorizer() = default;
404 
405   /// Create a new empty loop. Unlink the old loop and connect the new one.
406   /// Return the pre-header block of the new loop.
407   BasicBlock *createVectorizedLoopSkeleton();
408 
409   /// Widen a single instruction within the innermost loop.
410   void widenInstruction(Instruction &I);
411 
412   /// Widen a single call instruction within the innermost loop.
413   void widenCallInstruction(CallInst &I, VPUser &ArgOperands,
414                             VPTransformState &State);
415 
416   /// Widen a single select instruction within the innermost loop.
417   void widenSelectInstruction(SelectInst &I, bool InvariantCond);
418 
419   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
420   void fixVectorizedLoop();
421 
422   // Return true if any runtime check is added.
423   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
424 
425   /// A type for vectorized values in the new loop. Each value from the
426   /// original loop, when vectorized, is represented by UF vector values in the
427   /// new unrolled loop, where UF is the unroll factor.
428   using VectorParts = SmallVector<Value *, 2>;
429 
430   /// Vectorize a single GetElementPtrInst based on information gathered and
431   /// decisions taken during planning.
432   void widenGEP(GetElementPtrInst *GEP, unsigned UF, unsigned VF,
433                 bool IsPtrLoopInvariant, SmallBitVector &IsIndexLoopInvariant);
434 
435   /// Vectorize a single PHINode in a block. This method handles the induction
436   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
437   /// arbitrary length vectors.
438   void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF);
439 
440   /// A helper function to scalarize a single Instruction in the innermost loop.
441   /// Generates a sequence of scalar instances for each lane between \p MinLane
442   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
443   /// inclusive..
444   void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance,
445                             bool IfPredicateInstr);
446 
447   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
448   /// is provided, the integer induction variable will first be truncated to
449   /// the corresponding type.
450   void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr);
451 
452   /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a
453   /// vector or scalar value on-demand if one is not yet available. When
454   /// vectorizing a loop, we visit the definition of an instruction before its
455   /// uses. When visiting the definition, we either vectorize or scalarize the
456   /// instruction, creating an entry for it in the corresponding map. (In some
457   /// cases, such as induction variables, we will create both vector and scalar
458   /// entries.) Then, as we encounter uses of the definition, we derive values
459   /// for each scalar or vector use unless such a value is already available.
460   /// For example, if we scalarize a definition and one of its uses is vector,
461   /// we build the required vector on-demand with an insertelement sequence
462   /// when visiting the use. Otherwise, if the use is scalar, we can use the
463   /// existing scalar definition.
464   ///
465   /// Return a value in the new loop corresponding to \p V from the original
466   /// loop at unroll index \p Part. If the value has already been vectorized,
467   /// the corresponding vector entry in VectorLoopValueMap is returned. If,
468   /// however, the value has a scalar entry in VectorLoopValueMap, we construct
469   /// a new vector value on-demand by inserting the scalar values into a vector
470   /// with an insertelement sequence. If the value has been neither vectorized
471   /// nor scalarized, it must be loop invariant, so we simply broadcast the
472   /// value into a vector.
473   Value *getOrCreateVectorValue(Value *V, unsigned Part);
474 
475   /// Return a value in the new loop corresponding to \p V from the original
476   /// loop at unroll and vector indices \p Instance. If the value has been
477   /// vectorized but not scalarized, the necessary extractelement instruction
478   /// will be generated.
479   Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance);
480 
481   /// Construct the vector value of a scalarized value \p V one lane at a time.
482   void packScalarIntoVectorValue(Value *V, const VPIteration &Instance);
483 
484   /// Try to vectorize interleaved access group \p Group with the base address
485   /// given in \p Addr, optionally masking the vector operations if \p
486   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
487   /// values in the vectorized loop.
488   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
489                                 VPTransformState &State, VPValue *Addr,
490                                 VPValue *BlockInMask = nullptr);
491 
492   /// Vectorize Load and Store instructions with the base address given in \p
493   /// Addr, optionally masking the vector operations if \p BlockInMask is
494   /// non-null. Use \p State to translate given VPValues to IR values in the
495   /// vectorized loop.
496   void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State,
497                                   VPValue *Addr, VPValue *StoredValue,
498                                   VPValue *BlockInMask);
499 
500   /// Set the debug location in the builder using the debug location in
501   /// the instruction.
502   void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
503 
504   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
505   void fixNonInductionPHIs(void);
506 
507 protected:
508   friend class LoopVectorizationPlanner;
509 
510   /// A small list of PHINodes.
511   using PhiVector = SmallVector<PHINode *, 4>;
512 
513   /// A type for scalarized values in the new loop. Each value from the
514   /// original loop, when scalarized, is represented by UF x VF scalar values
515   /// in the new unrolled loop, where UF is the unroll factor and VF is the
516   /// vectorization factor.
517   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
518 
519   /// Set up the values of the IVs correctly when exiting the vector loop.
520   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
521                     Value *CountRoundDown, Value *EndValue,
522                     BasicBlock *MiddleBlock);
523 
524   /// Create a new induction variable inside L.
525   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
526                                    Value *Step, Instruction *DL);
527 
528   /// Handle all cross-iteration phis in the header.
529   void fixCrossIterationPHIs();
530 
531   /// Fix a first-order recurrence. This is the second phase of vectorizing
532   /// this phi node.
533   void fixFirstOrderRecurrence(PHINode *Phi);
534 
535   /// Fix a reduction cross-iteration phi. This is the second phase of
536   /// vectorizing this phi node.
537   void fixReduction(PHINode *Phi);
538 
539   /// Clear NSW/NUW flags from reduction instructions if necessary.
540   void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc);
541 
542   /// The Loop exit block may have single value PHI nodes with some
543   /// incoming value. While vectorizing we only handled real values
544   /// that were defined inside the loop and we should have one value for
545   /// each predecessor of its parent basic block. See PR14725.
546   void fixLCSSAPHIs();
547 
548   /// Iteratively sink the scalarized operands of a predicated instruction into
549   /// the block that was created for it.
550   void sinkScalarOperands(Instruction *PredInst);
551 
552   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
553   /// represented as.
554   void truncateToMinimalBitwidths();
555 
556   /// Create a broadcast instruction. This method generates a broadcast
557   /// instruction (shuffle) for loop invariant values and for the induction
558   /// value. If this is the induction variable then we extend it to N, N+1, ...
559   /// this is needed because each iteration in the loop corresponds to a SIMD
560   /// element.
561   virtual Value *getBroadcastInstrs(Value *V);
562 
563   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
564   /// to each vector element of Val. The sequence starts at StartIndex.
565   /// \p Opcode is relevant for FP induction variable.
566   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
567                                Instruction::BinaryOps Opcode =
568                                Instruction::BinaryOpsEnd);
569 
570   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
571   /// variable on which to base the steps, \p Step is the size of the step, and
572   /// \p EntryVal is the value from the original loop that maps to the steps.
573   /// Note that \p EntryVal doesn't have to be an induction variable - it
574   /// can also be a truncate instruction.
575   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
576                         const InductionDescriptor &ID);
577 
578   /// Create a vector induction phi node based on an existing scalar one. \p
579   /// EntryVal is the value from the original loop that maps to the vector phi
580   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
581   /// truncate instruction, instead of widening the original IV, we widen a
582   /// version of the IV truncated to \p EntryVal's type.
583   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
584                                        Value *Step, Instruction *EntryVal);
585 
586   /// Returns true if an instruction \p I should be scalarized instead of
587   /// vectorized for the chosen vectorization factor.
588   bool shouldScalarizeInstruction(Instruction *I) const;
589 
590   /// Returns true if we should generate a scalar version of \p IV.
591   bool needsScalarInduction(Instruction *IV) const;
592 
593   /// If there is a cast involved in the induction variable \p ID, which should
594   /// be ignored in the vectorized loop body, this function records the
595   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
596   /// cast. We had already proved that the casted Phi is equal to the uncasted
597   /// Phi in the vectorized loop (under a runtime guard), and therefore
598   /// there is no need to vectorize the cast - the same value can be used in the
599   /// vector loop for both the Phi and the cast.
600   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
601   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
602   ///
603   /// \p EntryVal is the value from the original loop that maps to the vector
604   /// phi node and is used to distinguish what is the IV currently being
605   /// processed - original one (if \p EntryVal is a phi corresponding to the
606   /// original IV) or the "newly-created" one based on the proof mentioned above
607   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
608   /// latter case \p EntryVal is a TruncInst and we must not record anything for
609   /// that IV, but it's error-prone to expect callers of this routine to care
610   /// about that, hence this explicit parameter.
611   void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID,
612                                              const Instruction *EntryVal,
613                                              Value *VectorLoopValue,
614                                              unsigned Part,
615                                              unsigned Lane = UINT_MAX);
616 
617   /// Generate a shuffle sequence that will reverse the vector Vec.
618   virtual Value *reverseVector(Value *Vec);
619 
620   /// Returns (and creates if needed) the original loop trip count.
621   Value *getOrCreateTripCount(Loop *NewLoop);
622 
623   /// Returns (and creates if needed) the trip count of the widened loop.
624   Value *getOrCreateVectorTripCount(Loop *NewLoop);
625 
626   /// Returns a bitcasted value to the requested vector type.
627   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
628   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
629                                 const DataLayout &DL);
630 
631   /// Emit a bypass check to see if the vector trip count is zero, including if
632   /// it overflows.
633   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
634 
635   /// Emit a bypass check to see if all of the SCEV assumptions we've
636   /// had to make are correct.
637   void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
638 
639   /// Emit bypass checks to check any memory assumptions we may have made.
640   void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
641 
642   /// Compute the transformed value of Index at offset StartValue using step
643   /// StepValue.
644   /// For integer induction, returns StartValue + Index * StepValue.
645   /// For pointer induction, returns StartValue[Index * StepValue].
646   /// FIXME: The newly created binary instructions should contain nsw/nuw
647   /// flags, which can be found from the original scalar operations.
648   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
649                               const DataLayout &DL,
650                               const InductionDescriptor &ID) const;
651 
652   /// Add additional metadata to \p To that was not present on \p Orig.
653   ///
654   /// Currently this is used to add the noalias annotations based on the
655   /// inserted memchecks.  Use this for instructions that are *cloned* into the
656   /// vector loop.
657   void addNewMetadata(Instruction *To, const Instruction *Orig);
658 
659   /// Add metadata from one instruction to another.
660   ///
661   /// This includes both the original MDs from \p From and additional ones (\see
662   /// addNewMetadata).  Use this for *newly created* instructions in the vector
663   /// loop.
664   void addMetadata(Instruction *To, Instruction *From);
665 
666   /// Similar to the previous function but it adds the metadata to a
667   /// vector of instructions.
668   void addMetadata(ArrayRef<Value *> To, Instruction *From);
669 
670   /// The original loop.
671   Loop *OrigLoop;
672 
673   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
674   /// dynamic knowledge to simplify SCEV expressions and converts them to a
675   /// more usable form.
676   PredicatedScalarEvolution &PSE;
677 
678   /// Loop Info.
679   LoopInfo *LI;
680 
681   /// Dominator Tree.
682   DominatorTree *DT;
683 
684   /// Alias Analysis.
685   AliasAnalysis *AA;
686 
687   /// Target Library Info.
688   const TargetLibraryInfo *TLI;
689 
690   /// Target Transform Info.
691   const TargetTransformInfo *TTI;
692 
693   /// Assumption Cache.
694   AssumptionCache *AC;
695 
696   /// Interface to emit optimization remarks.
697   OptimizationRemarkEmitter *ORE;
698 
699   /// LoopVersioning.  It's only set up (non-null) if memchecks were
700   /// used.
701   ///
702   /// This is currently only used to add no-alias metadata based on the
703   /// memchecks.  The actually versioning is performed manually.
704   std::unique_ptr<LoopVersioning> LVer;
705 
706   /// The vectorization SIMD factor to use. Each vector will have this many
707   /// vector elements.
708   unsigned VF;
709 
710   /// The vectorization unroll factor to use. Each scalar is vectorized to this
711   /// many different vector instructions.
712   unsigned UF;
713 
714   /// The builder that we use
715   IRBuilder<> Builder;
716 
717   // --- Vectorization state ---
718 
719   /// The vector-loop preheader.
720   BasicBlock *LoopVectorPreHeader;
721 
722   /// The scalar-loop preheader.
723   BasicBlock *LoopScalarPreHeader;
724 
725   /// Middle Block between the vector and the scalar.
726   BasicBlock *LoopMiddleBlock;
727 
728   /// The ExitBlock of the scalar loop.
729   BasicBlock *LoopExitBlock;
730 
731   /// The vector loop body.
732   BasicBlock *LoopVectorBody;
733 
734   /// The scalar loop body.
735   BasicBlock *LoopScalarBody;
736 
737   /// A list of all bypass blocks. The first block is the entry of the loop.
738   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
739 
740   /// The new Induction variable which was added to the new block.
741   PHINode *Induction = nullptr;
742 
743   /// The induction variable of the old basic block.
744   PHINode *OldInduction = nullptr;
745 
746   /// Maps values from the original loop to their corresponding values in the
747   /// vectorized loop. A key value can map to either vector values, scalar
748   /// values or both kinds of values, depending on whether the key was
749   /// vectorized and scalarized.
750   VectorizerValueMap VectorLoopValueMap;
751 
752   /// Store instructions that were predicated.
753   SmallVector<Instruction *, 4> PredicatedInstructions;
754 
755   /// Trip count of the original loop.
756   Value *TripCount = nullptr;
757 
758   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
759   Value *VectorTripCount = nullptr;
760 
761   /// The legality analysis.
762   LoopVectorizationLegality *Legal;
763 
764   /// The profitablity analysis.
765   LoopVectorizationCostModel *Cost;
766 
767   // Record whether runtime checks are added.
768   bool AddedSafetyChecks = false;
769 
770   // Holds the end values for each induction variable. We save the end values
771   // so we can later fix-up the external users of the induction variables.
772   DenseMap<PHINode *, Value *> IVEndValues;
773 
774   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
775   // fixed up at the end of vector code generation.
776   SmallVector<PHINode *, 8> OrigPHIsToFix;
777 };
778 
779 class InnerLoopUnroller : public InnerLoopVectorizer {
780 public:
781   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
782                     LoopInfo *LI, DominatorTree *DT,
783                     const TargetLibraryInfo *TLI,
784                     const TargetTransformInfo *TTI, AssumptionCache *AC,
785                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
786                     LoopVectorizationLegality *LVL,
787                     LoopVectorizationCostModel *CM)
788       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1,
789                             UnrollFactor, LVL, CM) {}
790 
791 private:
792   Value *getBroadcastInstrs(Value *V) override;
793   Value *getStepVector(Value *Val, int StartIdx, Value *Step,
794                        Instruction::BinaryOps Opcode =
795                        Instruction::BinaryOpsEnd) override;
796   Value *reverseVector(Value *Vec) override;
797 };
798 
799 } // end namespace llvm
800 
801 /// Look for a meaningful debug location on the instruction or it's
802 /// operands.
803 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
804   if (!I)
805     return I;
806 
807   DebugLoc Empty;
808   if (I->getDebugLoc() != Empty)
809     return I;
810 
811   for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
812     if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
813       if (OpInst->getDebugLoc() != Empty)
814         return OpInst;
815   }
816 
817   return I;
818 }
819 
820 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
821   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
822     const DILocation *DIL = Inst->getDebugLoc();
823     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
824         !isa<DbgInfoIntrinsic>(Inst)) {
825       auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(UF * VF);
826       if (NewDIL)
827         B.SetCurrentDebugLocation(NewDIL.getValue());
828       else
829         LLVM_DEBUG(dbgs()
830                    << "Failed to create new discriminator: "
831                    << DIL->getFilename() << " Line: " << DIL->getLine());
832     }
833     else
834       B.SetCurrentDebugLocation(DIL);
835   } else
836     B.SetCurrentDebugLocation(DebugLoc());
837 }
838 
839 /// Write a record \p DebugMsg about vectorization failure to the debug
840 /// output stream. If \p I is passed, it is an instruction that prevents
841 /// vectorization.
842 #ifndef NDEBUG
843 static void debugVectorizationFailure(const StringRef DebugMsg,
844     Instruction *I) {
845   dbgs() << "LV: Not vectorizing: " << DebugMsg;
846   if (I != nullptr)
847     dbgs() << " " << *I;
848   else
849     dbgs() << '.';
850   dbgs() << '\n';
851 }
852 #endif
853 
854 /// Create an analysis remark that explains why vectorization failed
855 ///
856 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
857 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
858 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
859 /// the location of the remark.  \return the remark object that can be
860 /// streamed to.
861 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
862     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
863   Value *CodeRegion = TheLoop->getHeader();
864   DebugLoc DL = TheLoop->getStartLoc();
865 
866   if (I) {
867     CodeRegion = I->getParent();
868     // If there is no debug location attached to the instruction, revert back to
869     // using the loop's.
870     if (I->getDebugLoc())
871       DL = I->getDebugLoc();
872   }
873 
874   OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
875   R << "loop not vectorized: ";
876   return R;
877 }
878 
879 namespace llvm {
880 
881 void reportVectorizationFailure(const StringRef DebugMsg,
882     const StringRef OREMsg, const StringRef ORETag,
883     OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) {
884   LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I));
885   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
886   ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(),
887                 ORETag, TheLoop, I) << OREMsg);
888 }
889 
890 } // end namespace llvm
891 
892 #ifndef NDEBUG
893 /// \return string containing a file name and a line # for the given loop.
894 static std::string getDebugLocString(const Loop *L) {
895   std::string Result;
896   if (L) {
897     raw_string_ostream OS(Result);
898     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
899       LoopDbgLoc.print(OS);
900     else
901       // Just print the module name.
902       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
903     OS.flush();
904   }
905   return Result;
906 }
907 #endif
908 
909 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
910                                          const Instruction *Orig) {
911   // If the loop was versioned with memchecks, add the corresponding no-alias
912   // metadata.
913   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
914     LVer->annotateInstWithNoAlias(To, Orig);
915 }
916 
917 void InnerLoopVectorizer::addMetadata(Instruction *To,
918                                       Instruction *From) {
919   propagateMetadata(To, From);
920   addNewMetadata(To, From);
921 }
922 
923 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
924                                       Instruction *From) {
925   for (Value *V : To) {
926     if (Instruction *I = dyn_cast<Instruction>(V))
927       addMetadata(I, From);
928   }
929 }
930 
931 namespace llvm {
932 
933 // Loop vectorization cost-model hints how the scalar epilogue loop should be
934 // lowered.
935 enum ScalarEpilogueLowering {
936 
937   // The default: allowing scalar epilogues.
938   CM_ScalarEpilogueAllowed,
939 
940   // Vectorization with OptForSize: don't allow epilogues.
941   CM_ScalarEpilogueNotAllowedOptSize,
942 
943   // A special case of vectorisation with OptForSize: loops with a very small
944   // trip count are considered for vectorization under OptForSize, thereby
945   // making sure the cost of their loop body is dominant, free of runtime
946   // guards and scalar iteration overheads.
947   CM_ScalarEpilogueNotAllowedLowTripLoop,
948 
949   // Loop hint predicate indicating an epilogue is undesired.
950   CM_ScalarEpilogueNotNeededUsePredicate
951 };
952 
953 /// LoopVectorizationCostModel - estimates the expected speedups due to
954 /// vectorization.
955 /// In many cases vectorization is not profitable. This can happen because of
956 /// a number of reasons. In this class we mainly attempt to predict the
957 /// expected speedup/slowdowns due to the supported instruction set. We use the
958 /// TargetTransformInfo to query the different backends for the cost of
959 /// different operations.
960 class LoopVectorizationCostModel {
961 public:
962   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
963                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
964                              LoopVectorizationLegality *Legal,
965                              const TargetTransformInfo &TTI,
966                              const TargetLibraryInfo *TLI, DemandedBits *DB,
967                              AssumptionCache *AC,
968                              OptimizationRemarkEmitter *ORE, const Function *F,
969                              const LoopVectorizeHints *Hints,
970                              InterleavedAccessInfo &IAI)
971       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
972         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
973         Hints(Hints), InterleaveInfo(IAI) {}
974 
975   /// \return An upper bound for the vectorization factor, or None if
976   /// vectorization and interleaving should be avoided up front.
977   Optional<unsigned> computeMaxVF();
978 
979   /// \return True if runtime checks are required for vectorization, and false
980   /// otherwise.
981   bool runtimeChecksRequired();
982 
983   /// \return The most profitable vectorization factor and the cost of that VF.
984   /// This method checks every power of two up to MaxVF. If UserVF is not ZERO
985   /// then this vectorization factor will be selected if vectorization is
986   /// possible.
987   VectorizationFactor selectVectorizationFactor(unsigned MaxVF);
988 
989   /// Setup cost-based decisions for user vectorization factor.
990   void selectUserVectorizationFactor(unsigned UserVF) {
991     collectUniformsAndScalars(UserVF);
992     collectInstsToScalarize(UserVF);
993   }
994 
995   /// \return The size (in bits) of the smallest and widest types in the code
996   /// that needs to be vectorized. We ignore values that remain scalar such as
997   /// 64 bit loop indices.
998   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
999 
1000   /// \return The desired interleave count.
1001   /// If interleave count has been specified by metadata it will be returned.
1002   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1003   /// are the selected vectorization factor and the cost of the selected VF.
1004   unsigned selectInterleaveCount(unsigned VF, unsigned LoopCost);
1005 
1006   /// Memory access instruction may be vectorized in more than one way.
1007   /// Form of instruction after vectorization depends on cost.
1008   /// This function takes cost-based decisions for Load/Store instructions
1009   /// and collects them in a map. This decisions map is used for building
1010   /// the lists of loop-uniform and loop-scalar instructions.
1011   /// The calculated cost is saved with widening decision in order to
1012   /// avoid redundant calculations.
1013   void setCostBasedWideningDecision(unsigned VF);
1014 
1015   /// A struct that represents some properties of the register usage
1016   /// of a loop.
1017   struct RegisterUsage {
1018     /// Holds the number of loop invariant values that are used in the loop.
1019     /// The key is ClassID of target-provided register class.
1020     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1021     /// Holds the maximum number of concurrent live intervals in the loop.
1022     /// The key is ClassID of target-provided register class.
1023     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1024   };
1025 
1026   /// \return Returns information about the register usages of the loop for the
1027   /// given vectorization factors.
1028   SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs);
1029 
1030   /// Collect values we want to ignore in the cost model.
1031   void collectValuesToIgnore();
1032 
1033   /// \returns The smallest bitwidth each instruction can be represented with.
1034   /// The vector equivalents of these instructions should be truncated to this
1035   /// type.
1036   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1037     return MinBWs;
1038   }
1039 
1040   /// \returns True if it is more profitable to scalarize instruction \p I for
1041   /// vectorization factor \p VF.
1042   bool isProfitableToScalarize(Instruction *I, unsigned VF) const {
1043     assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1.");
1044 
1045     // Cost model is not run in the VPlan-native path - return conservative
1046     // result until this changes.
1047     if (EnableVPlanNativePath)
1048       return false;
1049 
1050     auto Scalars = InstsToScalarize.find(VF);
1051     assert(Scalars != InstsToScalarize.end() &&
1052            "VF not yet analyzed for scalarization profitability");
1053     return Scalars->second.find(I) != Scalars->second.end();
1054   }
1055 
1056   /// Returns true if \p I is known to be uniform after vectorization.
1057   bool isUniformAfterVectorization(Instruction *I, unsigned VF) const {
1058     if (VF == 1)
1059       return true;
1060 
1061     // Cost model is not run in the VPlan-native path - return conservative
1062     // result until this changes.
1063     if (EnableVPlanNativePath)
1064       return false;
1065 
1066     auto UniformsPerVF = Uniforms.find(VF);
1067     assert(UniformsPerVF != Uniforms.end() &&
1068            "VF not yet analyzed for uniformity");
1069     return UniformsPerVF->second.find(I) != UniformsPerVF->second.end();
1070   }
1071 
1072   /// Returns true if \p I is known to be scalar after vectorization.
1073   bool isScalarAfterVectorization(Instruction *I, unsigned VF) const {
1074     if (VF == 1)
1075       return true;
1076 
1077     // Cost model is not run in the VPlan-native path - return conservative
1078     // result until this changes.
1079     if (EnableVPlanNativePath)
1080       return false;
1081 
1082     auto ScalarsPerVF = Scalars.find(VF);
1083     assert(ScalarsPerVF != Scalars.end() &&
1084            "Scalar values are not calculated for VF");
1085     return ScalarsPerVF->second.find(I) != ScalarsPerVF->second.end();
1086   }
1087 
1088   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1089   /// for vectorization factor \p VF.
1090   bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const {
1091     return VF > 1 && MinBWs.find(I) != MinBWs.end() &&
1092            !isProfitableToScalarize(I, VF) &&
1093            !isScalarAfterVectorization(I, VF);
1094   }
1095 
1096   /// Decision that was taken during cost calculation for memory instruction.
1097   enum InstWidening {
1098     CM_Unknown,
1099     CM_Widen,         // For consecutive accesses with stride +1.
1100     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1101     CM_Interleave,
1102     CM_GatherScatter,
1103     CM_Scalarize
1104   };
1105 
1106   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1107   /// instruction \p I and vector width \p VF.
1108   void setWideningDecision(Instruction *I, unsigned VF, InstWidening W,
1109                            unsigned Cost) {
1110     assert(VF >= 2 && "Expected VF >=2");
1111     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1112   }
1113 
1114   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1115   /// interleaving group \p Grp and vector width \p VF.
1116   void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF,
1117                            InstWidening W, unsigned Cost) {
1118     assert(VF >= 2 && "Expected VF >=2");
1119     /// Broadcast this decicion to all instructions inside the group.
1120     /// But the cost will be assigned to one instruction only.
1121     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1122       if (auto *I = Grp->getMember(i)) {
1123         if (Grp->getInsertPos() == I)
1124           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1125         else
1126           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1127       }
1128     }
1129   }
1130 
1131   /// Return the cost model decision for the given instruction \p I and vector
1132   /// width \p VF. Return CM_Unknown if this instruction did not pass
1133   /// through the cost modeling.
1134   InstWidening getWideningDecision(Instruction *I, unsigned VF) {
1135     assert(VF >= 2 && "Expected VF >=2");
1136 
1137     // Cost model is not run in the VPlan-native path - return conservative
1138     // result until this changes.
1139     if (EnableVPlanNativePath)
1140       return CM_GatherScatter;
1141 
1142     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1143     auto Itr = WideningDecisions.find(InstOnVF);
1144     if (Itr == WideningDecisions.end())
1145       return CM_Unknown;
1146     return Itr->second.first;
1147   }
1148 
1149   /// Return the vectorization cost for the given instruction \p I and vector
1150   /// width \p VF.
1151   unsigned getWideningCost(Instruction *I, unsigned VF) {
1152     assert(VF >= 2 && "Expected VF >=2");
1153     std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
1154     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1155            "The cost is not calculated");
1156     return WideningDecisions[InstOnVF].second;
1157   }
1158 
1159   /// Return True if instruction \p I is an optimizable truncate whose operand
1160   /// is an induction variable. Such a truncate will be removed by adding a new
1161   /// induction variable with the destination type.
1162   bool isOptimizableIVTruncate(Instruction *I, unsigned VF) {
1163     // If the instruction is not a truncate, return false.
1164     auto *Trunc = dyn_cast<TruncInst>(I);
1165     if (!Trunc)
1166       return false;
1167 
1168     // Get the source and destination types of the truncate.
1169     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1170     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1171 
1172     // If the truncate is free for the given types, return false. Replacing a
1173     // free truncate with an induction variable would add an induction variable
1174     // update instruction to each iteration of the loop. We exclude from this
1175     // check the primary induction variable since it will need an update
1176     // instruction regardless.
1177     Value *Op = Trunc->getOperand(0);
1178     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1179       return false;
1180 
1181     // If the truncated value is not an induction variable, return false.
1182     return Legal->isInductionPhi(Op);
1183   }
1184 
1185   /// Collects the instructions to scalarize for each predicated instruction in
1186   /// the loop.
1187   void collectInstsToScalarize(unsigned VF);
1188 
1189   /// Collect Uniform and Scalar values for the given \p VF.
1190   /// The sets depend on CM decision for Load/Store instructions
1191   /// that may be vectorized as interleave, gather-scatter or scalarized.
1192   void collectUniformsAndScalars(unsigned VF) {
1193     // Do the analysis once.
1194     if (VF == 1 || Uniforms.find(VF) != Uniforms.end())
1195       return;
1196     setCostBasedWideningDecision(VF);
1197     collectLoopUniforms(VF);
1198     collectLoopScalars(VF);
1199   }
1200 
1201   /// Returns true if the target machine supports masked store operation
1202   /// for the given \p DataType and kind of access to \p Ptr.
1203   bool isLegalMaskedStore(Type *DataType, Value *Ptr, MaybeAlign Alignment) {
1204     return Legal->isConsecutivePtr(Ptr) &&
1205            TTI.isLegalMaskedStore(DataType, Alignment);
1206   }
1207 
1208   /// Returns true if the target machine supports masked load operation
1209   /// for the given \p DataType and kind of access to \p Ptr.
1210   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, MaybeAlign Alignment) {
1211     return Legal->isConsecutivePtr(Ptr) &&
1212            TTI.isLegalMaskedLoad(DataType, Alignment);
1213   }
1214 
1215   /// Returns true if the target machine supports masked scatter operation
1216   /// for the given \p DataType.
1217   bool isLegalMaskedScatter(Type *DataType, MaybeAlign Alignment) {
1218     return TTI.isLegalMaskedScatter(DataType, Alignment);
1219   }
1220 
1221   /// Returns true if the target machine supports masked gather operation
1222   /// for the given \p DataType.
1223   bool isLegalMaskedGather(Type *DataType, MaybeAlign Alignment) {
1224     return TTI.isLegalMaskedGather(DataType, Alignment);
1225   }
1226 
1227   /// Returns true if the target machine can represent \p V as a masked gather
1228   /// or scatter operation.
1229   bool isLegalGatherOrScatter(Value *V) {
1230     bool LI = isa<LoadInst>(V);
1231     bool SI = isa<StoreInst>(V);
1232     if (!LI && !SI)
1233       return false;
1234     auto *Ty = getMemInstValueType(V);
1235     MaybeAlign Align = getLoadStoreAlignment(V);
1236     return (LI && isLegalMaskedGather(Ty, Align)) ||
1237            (SI && isLegalMaskedScatter(Ty, Align));
1238   }
1239 
1240   /// Returns true if \p I is an instruction that will be scalarized with
1241   /// predication. Such instructions include conditional stores and
1242   /// instructions that may divide by zero.
1243   /// If a non-zero VF has been calculated, we check if I will be scalarized
1244   /// predication for that VF.
1245   bool isScalarWithPredication(Instruction *I, unsigned VF = 1);
1246 
1247   // Returns true if \p I is an instruction that will be predicated either
1248   // through scalar predication or masked load/store or masked gather/scatter.
1249   // Superset of instructions that return true for isScalarWithPredication.
1250   bool isPredicatedInst(Instruction *I) {
1251     if (!blockNeedsPredication(I->getParent()))
1252       return false;
1253     // Loads and stores that need some form of masked operation are predicated
1254     // instructions.
1255     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1256       return Legal->isMaskRequired(I);
1257     return isScalarWithPredication(I);
1258   }
1259 
1260   /// Returns true if \p I is a memory instruction with consecutive memory
1261   /// access that can be widened.
1262   bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1);
1263 
1264   /// Returns true if \p I is a memory instruction in an interleaved-group
1265   /// of memory accesses that can be vectorized with wide vector loads/stores
1266   /// and shuffles.
1267   bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1);
1268 
1269   /// Check if \p Instr belongs to any interleaved access group.
1270   bool isAccessInterleaved(Instruction *Instr) {
1271     return InterleaveInfo.isInterleaved(Instr);
1272   }
1273 
1274   /// Get the interleaved access group that \p Instr belongs to.
1275   const InterleaveGroup<Instruction> *
1276   getInterleavedAccessGroup(Instruction *Instr) {
1277     return InterleaveInfo.getInterleaveGroup(Instr);
1278   }
1279 
1280   /// Returns true if an interleaved group requires a scalar iteration
1281   /// to handle accesses with gaps, and there is nothing preventing us from
1282   /// creating a scalar epilogue.
1283   bool requiresScalarEpilogue() const {
1284     return isScalarEpilogueAllowed() && InterleaveInfo.requiresScalarEpilogue();
1285   }
1286 
1287   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1288   /// loop hint annotation.
1289   bool isScalarEpilogueAllowed() const {
1290     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1291   }
1292 
1293   /// Returns true if all loop blocks should be masked to fold tail loop.
1294   bool foldTailByMasking() const { return FoldTailByMasking; }
1295 
1296   bool blockNeedsPredication(BasicBlock *BB) {
1297     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1298   }
1299 
1300   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1301   /// with factor VF.  Return the cost of the instruction, including
1302   /// scalarization overhead if it's needed.
1303   unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF);
1304 
1305   /// Estimate cost of a call instruction CI if it were vectorized with factor
1306   /// VF. Return the cost of the instruction, including scalarization overhead
1307   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1308   /// scalarized -
1309   /// i.e. either vector version isn't available, or is too expensive.
1310   unsigned getVectorCallCost(CallInst *CI, unsigned VF, bool &NeedToScalarize);
1311 
1312 private:
1313   unsigned NumPredStores = 0;
1314 
1315   /// \return An upper bound for the vectorization factor, larger than zero.
1316   /// One is returned if vectorization should best be avoided due to cost.
1317   unsigned computeFeasibleMaxVF(unsigned ConstTripCount);
1318 
1319   /// The vectorization cost is a combination of the cost itself and a boolean
1320   /// indicating whether any of the contributing operations will actually
1321   /// operate on
1322   /// vector values after type legalization in the backend. If this latter value
1323   /// is
1324   /// false, then all operations will be scalarized (i.e. no vectorization has
1325   /// actually taken place).
1326   using VectorizationCostTy = std::pair<unsigned, bool>;
1327 
1328   /// Returns the expected execution cost. The unit of the cost does
1329   /// not matter because we use the 'cost' units to compare different
1330   /// vector widths. The cost that is returned is *not* normalized by
1331   /// the factor width.
1332   VectorizationCostTy expectedCost(unsigned VF);
1333 
1334   /// Returns the execution time cost of an instruction for a given vector
1335   /// width. Vector width of one means scalar.
1336   VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF);
1337 
1338   /// The cost-computation logic from getInstructionCost which provides
1339   /// the vector type as an output parameter.
1340   unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy);
1341 
1342   /// Calculate vectorization cost of memory instruction \p I.
1343   unsigned getMemoryInstructionCost(Instruction *I, unsigned VF);
1344 
1345   /// The cost computation for scalarized memory instruction.
1346   unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF);
1347 
1348   /// The cost computation for interleaving group of memory instructions.
1349   unsigned getInterleaveGroupCost(Instruction *I, unsigned VF);
1350 
1351   /// The cost computation for Gather/Scatter instruction.
1352   unsigned getGatherScatterCost(Instruction *I, unsigned VF);
1353 
1354   /// The cost computation for widening instruction \p I with consecutive
1355   /// memory access.
1356   unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF);
1357 
1358   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1359   /// Load: scalar load + broadcast.
1360   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1361   /// element)
1362   unsigned getUniformMemOpCost(Instruction *I, unsigned VF);
1363 
1364   /// Estimate the overhead of scalarizing an instruction. This is a
1365   /// convenience wrapper for the type-based getScalarizationOverhead API.
1366   unsigned getScalarizationOverhead(Instruction *I, unsigned VF);
1367 
1368   /// Returns whether the instruction is a load or store and will be a emitted
1369   /// as a vector operation.
1370   bool isConsecutiveLoadOrStore(Instruction *I);
1371 
1372   /// Returns true if an artificially high cost for emulated masked memrefs
1373   /// should be used.
1374   bool useEmulatedMaskMemRefHack(Instruction *I);
1375 
1376   /// Map of scalar integer values to the smallest bitwidth they can be legally
1377   /// represented as. The vector equivalents of these values should be truncated
1378   /// to this type.
1379   MapVector<Instruction *, uint64_t> MinBWs;
1380 
1381   /// A type representing the costs for instructions if they were to be
1382   /// scalarized rather than vectorized. The entries are Instruction-Cost
1383   /// pairs.
1384   using ScalarCostsTy = DenseMap<Instruction *, unsigned>;
1385 
1386   /// A set containing all BasicBlocks that are known to present after
1387   /// vectorization as a predicated block.
1388   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1389 
1390   /// Records whether it is allowed to have the original scalar loop execute at
1391   /// least once. This may be needed as a fallback loop in case runtime
1392   /// aliasing/dependence checks fail, or to handle the tail/remainder
1393   /// iterations when the trip count is unknown or doesn't divide by the VF,
1394   /// or as a peel-loop to handle gaps in interleave-groups.
1395   /// Under optsize and when the trip count is very small we don't allow any
1396   /// iterations to execute in the scalar loop.
1397   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1398 
1399   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1400   bool FoldTailByMasking = false;
1401 
1402   /// A map holding scalar costs for different vectorization factors. The
1403   /// presence of a cost for an instruction in the mapping indicates that the
1404   /// instruction will be scalarized when vectorizing with the associated
1405   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1406   DenseMap<unsigned, ScalarCostsTy> InstsToScalarize;
1407 
1408   /// Holds the instructions known to be uniform after vectorization.
1409   /// The data is collected per VF.
1410   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms;
1411 
1412   /// Holds the instructions known to be scalar after vectorization.
1413   /// The data is collected per VF.
1414   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars;
1415 
1416   /// Holds the instructions (address computations) that are forced to be
1417   /// scalarized.
1418   DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1419 
1420   /// Returns the expected difference in cost from scalarizing the expression
1421   /// feeding a predicated instruction \p PredInst. The instructions to
1422   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1423   /// non-negative return value implies the expression will be scalarized.
1424   /// Currently, only single-use chains are considered for scalarization.
1425   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1426                               unsigned VF);
1427 
1428   /// Collect the instructions that are uniform after vectorization. An
1429   /// instruction is uniform if we represent it with a single scalar value in
1430   /// the vectorized loop corresponding to each vector iteration. Examples of
1431   /// uniform instructions include pointer operands of consecutive or
1432   /// interleaved memory accesses. Note that although uniformity implies an
1433   /// instruction will be scalar, the reverse is not true. In general, a
1434   /// scalarized instruction will be represented by VF scalar values in the
1435   /// vectorized loop, each corresponding to an iteration of the original
1436   /// scalar loop.
1437   void collectLoopUniforms(unsigned VF);
1438 
1439   /// Collect the instructions that are scalar after vectorization. An
1440   /// instruction is scalar if it is known to be uniform or will be scalarized
1441   /// during vectorization. Non-uniform scalarized instructions will be
1442   /// represented by VF values in the vectorized loop, each corresponding to an
1443   /// iteration of the original scalar loop.
1444   void collectLoopScalars(unsigned VF);
1445 
1446   /// Keeps cost model vectorization decision and cost for instructions.
1447   /// Right now it is used for memory instructions only.
1448   using DecisionList = DenseMap<std::pair<Instruction *, unsigned>,
1449                                 std::pair<InstWidening, unsigned>>;
1450 
1451   DecisionList WideningDecisions;
1452 
1453   /// Returns true if \p V is expected to be vectorized and it needs to be
1454   /// extracted.
1455   bool needsExtract(Value *V, unsigned VF) const {
1456     Instruction *I = dyn_cast<Instruction>(V);
1457     if (VF == 1 || !I || !TheLoop->contains(I) || TheLoop->isLoopInvariant(I))
1458       return false;
1459 
1460     // Assume we can vectorize V (and hence we need extraction) if the
1461     // scalars are not computed yet. This can happen, because it is called
1462     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1463     // the scalars are collected. That should be a safe assumption in most
1464     // cases, because we check if the operands have vectorizable types
1465     // beforehand in LoopVectorizationLegality.
1466     return Scalars.find(VF) == Scalars.end() ||
1467            !isScalarAfterVectorization(I, VF);
1468   };
1469 
1470   /// Returns a range containing only operands needing to be extracted.
1471   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1472                                                    unsigned VF) {
1473     return SmallVector<Value *, 4>(make_filter_range(
1474         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1475   }
1476 
1477 public:
1478   /// The loop that we evaluate.
1479   Loop *TheLoop;
1480 
1481   /// Predicated scalar evolution analysis.
1482   PredicatedScalarEvolution &PSE;
1483 
1484   /// Loop Info analysis.
1485   LoopInfo *LI;
1486 
1487   /// Vectorization legality.
1488   LoopVectorizationLegality *Legal;
1489 
1490   /// Vector target information.
1491   const TargetTransformInfo &TTI;
1492 
1493   /// Target Library Info.
1494   const TargetLibraryInfo *TLI;
1495 
1496   /// Demanded bits analysis.
1497   DemandedBits *DB;
1498 
1499   /// Assumption cache.
1500   AssumptionCache *AC;
1501 
1502   /// Interface to emit optimization remarks.
1503   OptimizationRemarkEmitter *ORE;
1504 
1505   const Function *TheFunction;
1506 
1507   /// Loop Vectorize Hint.
1508   const LoopVectorizeHints *Hints;
1509 
1510   /// The interleave access information contains groups of interleaved accesses
1511   /// with the same stride and close to each other.
1512   InterleavedAccessInfo &InterleaveInfo;
1513 
1514   /// Values to ignore in the cost model.
1515   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1516 
1517   /// Values to ignore in the cost model when VF > 1.
1518   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1519 };
1520 
1521 } // end namespace llvm
1522 
1523 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
1524 // vectorization. The loop needs to be annotated with #pragma omp simd
1525 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
1526 // vector length information is not provided, vectorization is not considered
1527 // explicit. Interleave hints are not allowed either. These limitations will be
1528 // relaxed in the future.
1529 // Please, note that we are currently forced to abuse the pragma 'clang
1530 // vectorize' semantics. This pragma provides *auto-vectorization hints*
1531 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
1532 // provides *explicit vectorization hints* (LV can bypass legal checks and
1533 // assume that vectorization is legal). However, both hints are implemented
1534 // using the same metadata (llvm.loop.vectorize, processed by
1535 // LoopVectorizeHints). This will be fixed in the future when the native IR
1536 // representation for pragma 'omp simd' is introduced.
1537 static bool isExplicitVecOuterLoop(Loop *OuterLp,
1538                                    OptimizationRemarkEmitter *ORE) {
1539   assert(!OuterLp->empty() && "This is not an outer loop");
1540   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
1541 
1542   // Only outer loops with an explicit vectorization hint are supported.
1543   // Unannotated outer loops are ignored.
1544   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
1545     return false;
1546 
1547   Function *Fn = OuterLp->getHeader()->getParent();
1548   if (!Hints.allowVectorization(Fn, OuterLp,
1549                                 true /*VectorizeOnlyWhenForced*/)) {
1550     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
1551     return false;
1552   }
1553 
1554   if (Hints.getInterleave() > 1) {
1555     // TODO: Interleave support is future work.
1556     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
1557                          "outer loops.\n");
1558     Hints.emitRemarkWithHints();
1559     return false;
1560   }
1561 
1562   return true;
1563 }
1564 
1565 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
1566                                   OptimizationRemarkEmitter *ORE,
1567                                   SmallVectorImpl<Loop *> &V) {
1568   // Collect inner loops and outer loops without irreducible control flow. For
1569   // now, only collect outer loops that have explicit vectorization hints. If we
1570   // are stress testing the VPlan H-CFG construction, we collect the outermost
1571   // loop of every loop nest.
1572   if (L.empty() || VPlanBuildStressTest ||
1573       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
1574     LoopBlocksRPO RPOT(&L);
1575     RPOT.perform(LI);
1576     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
1577       V.push_back(&L);
1578       // TODO: Collect inner loops inside marked outer loops in case
1579       // vectorization fails for the outer loop. Do not invoke
1580       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
1581       // already known to be reducible. We can use an inherited attribute for
1582       // that.
1583       return;
1584     }
1585   }
1586   for (Loop *InnerL : L)
1587     collectSupportedLoops(*InnerL, LI, ORE, V);
1588 }
1589 
1590 namespace {
1591 
1592 /// The LoopVectorize Pass.
1593 struct LoopVectorize : public FunctionPass {
1594   /// Pass identification, replacement for typeid
1595   static char ID;
1596 
1597   LoopVectorizePass Impl;
1598 
1599   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
1600                          bool VectorizeOnlyWhenForced = false)
1601       : FunctionPass(ID) {
1602     Impl.InterleaveOnlyWhenForced = InterleaveOnlyWhenForced;
1603     Impl.VectorizeOnlyWhenForced = VectorizeOnlyWhenForced;
1604     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
1605   }
1606 
1607   bool runOnFunction(Function &F) override {
1608     if (skipFunction(F))
1609       return false;
1610 
1611     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1612     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1613     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1614     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1615     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
1616     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1617     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
1618     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1619     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1620     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
1621     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
1622     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
1623     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
1624 
1625     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
1626         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
1627 
1628     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
1629                         GetLAA, *ORE, PSI);
1630   }
1631 
1632   void getAnalysisUsage(AnalysisUsage &AU) const override {
1633     AU.addRequired<AssumptionCacheTracker>();
1634     AU.addRequired<BlockFrequencyInfoWrapperPass>();
1635     AU.addRequired<DominatorTreeWrapperPass>();
1636     AU.addRequired<LoopInfoWrapperPass>();
1637     AU.addRequired<ScalarEvolutionWrapperPass>();
1638     AU.addRequired<TargetTransformInfoWrapperPass>();
1639     AU.addRequired<AAResultsWrapperPass>();
1640     AU.addRequired<LoopAccessLegacyAnalysis>();
1641     AU.addRequired<DemandedBitsWrapperPass>();
1642     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
1643     AU.addRequired<InjectTLIMappingsLegacy>();
1644 
1645     // We currently do not preserve loopinfo/dominator analyses with outer loop
1646     // vectorization. Until this is addressed, mark these analyses as preserved
1647     // only for non-VPlan-native path.
1648     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
1649     if (!EnableVPlanNativePath) {
1650       AU.addPreserved<LoopInfoWrapperPass>();
1651       AU.addPreserved<DominatorTreeWrapperPass>();
1652     }
1653 
1654     AU.addPreserved<BasicAAWrapperPass>();
1655     AU.addPreserved<GlobalsAAWrapperPass>();
1656     AU.addRequired<ProfileSummaryInfoWrapperPass>();
1657   }
1658 };
1659 
1660 } // end anonymous namespace
1661 
1662 //===----------------------------------------------------------------------===//
1663 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
1664 // LoopVectorizationCostModel and LoopVectorizationPlanner.
1665 //===----------------------------------------------------------------------===//
1666 
1667 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
1668   // We need to place the broadcast of invariant variables outside the loop,
1669   // but only if it's proven safe to do so. Else, broadcast will be inside
1670   // vector loop body.
1671   Instruction *Instr = dyn_cast<Instruction>(V);
1672   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
1673                      (!Instr ||
1674                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
1675   // Place the code for broadcasting invariant variables in the new preheader.
1676   IRBuilder<>::InsertPointGuard Guard(Builder);
1677   if (SafeToHoist)
1678     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1679 
1680   // Broadcast the scalar into all locations in the vector.
1681   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
1682 
1683   return Shuf;
1684 }
1685 
1686 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
1687     const InductionDescriptor &II, Value *Step, Instruction *EntryVal) {
1688   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1689          "Expected either an induction phi-node or a truncate of it!");
1690   Value *Start = II.getStartValue();
1691 
1692   // Construct the initial value of the vector IV in the vector loop preheader
1693   auto CurrIP = Builder.saveIP();
1694   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
1695   if (isa<TruncInst>(EntryVal)) {
1696     assert(Start->getType()->isIntegerTy() &&
1697            "Truncation requires an integer type");
1698     auto *TruncType = cast<IntegerType>(EntryVal->getType());
1699     Step = Builder.CreateTrunc(Step, TruncType);
1700     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
1701   }
1702   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
1703   Value *SteppedStart =
1704       getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
1705 
1706   // We create vector phi nodes for both integer and floating-point induction
1707   // variables. Here, we determine the kind of arithmetic we will perform.
1708   Instruction::BinaryOps AddOp;
1709   Instruction::BinaryOps MulOp;
1710   if (Step->getType()->isIntegerTy()) {
1711     AddOp = Instruction::Add;
1712     MulOp = Instruction::Mul;
1713   } else {
1714     AddOp = II.getInductionOpcode();
1715     MulOp = Instruction::FMul;
1716   }
1717 
1718   // Multiply the vectorization factor by the step using integer or
1719   // floating-point arithmetic as appropriate.
1720   Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF);
1721   Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
1722 
1723   // Create a vector splat to use in the induction update.
1724   //
1725   // FIXME: If the step is non-constant, we create the vector splat with
1726   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
1727   //        handle a constant vector splat.
1728   Value *SplatVF =
1729       isa<Constant>(Mul)
1730           ? ConstantVector::getSplat({VF, false}, cast<Constant>(Mul))
1731           : Builder.CreateVectorSplat(VF, Mul);
1732   Builder.restoreIP(CurrIP);
1733 
1734   // We may need to add the step a number of times, depending on the unroll
1735   // factor. The last of those goes into the PHI.
1736   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
1737                                     &*LoopVectorBody->getFirstInsertionPt());
1738   VecInd->setDebugLoc(EntryVal->getDebugLoc());
1739   Instruction *LastInduction = VecInd;
1740   for (unsigned Part = 0; Part < UF; ++Part) {
1741     VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction);
1742 
1743     if (isa<TruncInst>(EntryVal))
1744       addMetadata(LastInduction, EntryVal);
1745     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part);
1746 
1747     LastInduction = cast<Instruction>(addFastMathFlag(
1748         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
1749     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
1750   }
1751 
1752   // Move the last step to the end of the latch block. This ensures consistent
1753   // placement of all induction updates.
1754   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
1755   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
1756   auto *ICmp = cast<Instruction>(Br->getCondition());
1757   LastInduction->moveBefore(ICmp);
1758   LastInduction->setName("vec.ind.next");
1759 
1760   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
1761   VecInd->addIncoming(LastInduction, LoopVectorLatch);
1762 }
1763 
1764 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
1765   return Cost->isScalarAfterVectorization(I, VF) ||
1766          Cost->isProfitableToScalarize(I, VF);
1767 }
1768 
1769 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
1770   if (shouldScalarizeInstruction(IV))
1771     return true;
1772   auto isScalarInst = [&](User *U) -> bool {
1773     auto *I = cast<Instruction>(U);
1774     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
1775   };
1776   return llvm::any_of(IV->users(), isScalarInst);
1777 }
1778 
1779 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
1780     const InductionDescriptor &ID, const Instruction *EntryVal,
1781     Value *VectorLoopVal, unsigned Part, unsigned Lane) {
1782   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1783          "Expected either an induction phi-node or a truncate of it!");
1784 
1785   // This induction variable is not the phi from the original loop but the
1786   // newly-created IV based on the proof that casted Phi is equal to the
1787   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
1788   // re-uses the same InductionDescriptor that original IV uses but we don't
1789   // have to do any recording in this case - that is done when original IV is
1790   // processed.
1791   if (isa<TruncInst>(EntryVal))
1792     return;
1793 
1794   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
1795   if (Casts.empty())
1796     return;
1797   // Only the first Cast instruction in the Casts vector is of interest.
1798   // The rest of the Casts (if exist) have no uses outside the
1799   // induction update chain itself.
1800   Instruction *CastInst = *Casts.begin();
1801   if (Lane < UINT_MAX)
1802     VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal);
1803   else
1804     VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal);
1805 }
1806 
1807 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) {
1808   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
1809          "Primary induction variable must have an integer type");
1810 
1811   auto II = Legal->getInductionVars().find(IV);
1812   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
1813 
1814   auto ID = II->second;
1815   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
1816 
1817   // The value from the original loop to which we are mapping the new induction
1818   // variable.
1819   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
1820 
1821   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
1822 
1823   // Generate code for the induction step. Note that induction steps are
1824   // required to be loop-invariant
1825   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
1826     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
1827            "Induction step should be loop invariant");
1828     if (PSE.getSE()->isSCEVable(IV->getType())) {
1829       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
1830       return Exp.expandCodeFor(Step, Step->getType(),
1831                                LoopVectorPreHeader->getTerminator());
1832     }
1833     return cast<SCEVUnknown>(Step)->getValue();
1834   };
1835 
1836   // The scalar value to broadcast. This is derived from the canonical
1837   // induction variable. If a truncation type is given, truncate the canonical
1838   // induction variable and step. Otherwise, derive these values from the
1839   // induction descriptor.
1840   auto CreateScalarIV = [&](Value *&Step) -> Value * {
1841     Value *ScalarIV = Induction;
1842     if (IV != OldInduction) {
1843       ScalarIV = IV->getType()->isIntegerTy()
1844                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
1845                      : Builder.CreateCast(Instruction::SIToFP, Induction,
1846                                           IV->getType());
1847       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
1848       ScalarIV->setName("offset.idx");
1849     }
1850     if (Trunc) {
1851       auto *TruncType = cast<IntegerType>(Trunc->getType());
1852       assert(Step->getType()->isIntegerTy() &&
1853              "Truncation requires an integer step");
1854       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
1855       Step = Builder.CreateTrunc(Step, TruncType);
1856     }
1857     return ScalarIV;
1858   };
1859 
1860   // Create the vector values from the scalar IV, in the absence of creating a
1861   // vector IV.
1862   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
1863     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
1864     for (unsigned Part = 0; Part < UF; ++Part) {
1865       Value *EntryPart =
1866           getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode());
1867       VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart);
1868       if (Trunc)
1869         addMetadata(EntryPart, Trunc);
1870       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part);
1871     }
1872   };
1873 
1874   // Now do the actual transformations, and start with creating the step value.
1875   Value *Step = CreateStepValue(ID.getStep());
1876   if (VF <= 1) {
1877     Value *ScalarIV = CreateScalarIV(Step);
1878     CreateSplatIV(ScalarIV, Step);
1879     return;
1880   }
1881 
1882   // Determine if we want a scalar version of the induction variable. This is
1883   // true if the induction variable itself is not widened, or if it has at
1884   // least one user in the loop that is not widened.
1885   auto NeedsScalarIV = needsScalarInduction(EntryVal);
1886   if (!NeedsScalarIV) {
1887     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
1888     return;
1889   }
1890 
1891   // Try to create a new independent vector induction variable. If we can't
1892   // create the phi node, we will splat the scalar induction variable in each
1893   // loop iteration.
1894   if (!shouldScalarizeInstruction(EntryVal)) {
1895     createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
1896     Value *ScalarIV = CreateScalarIV(Step);
1897     // Create scalar steps that can be used by instructions we will later
1898     // scalarize. Note that the addition of the scalar steps will not increase
1899     // the number of instructions in the loop in the common case prior to
1900     // InstCombine. We will be trading one vector extract for each scalar step.
1901     buildScalarSteps(ScalarIV, Step, EntryVal, ID);
1902     return;
1903   }
1904 
1905   // If we haven't yet vectorized the induction variable, splat the scalar
1906   // induction variable, and build the necessary step vectors.
1907   // TODO: Don't do it unless the vectorized IV is really required.
1908   Value *ScalarIV = CreateScalarIV(Step);
1909   CreateSplatIV(ScalarIV, Step);
1910   buildScalarSteps(ScalarIV, Step, EntryVal, ID);
1911 }
1912 
1913 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
1914                                           Instruction::BinaryOps BinOp) {
1915   // Create and check the types.
1916   assert(Val->getType()->isVectorTy() && "Must be a vector");
1917   int VLen = Val->getType()->getVectorNumElements();
1918 
1919   Type *STy = Val->getType()->getScalarType();
1920   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
1921          "Induction Step must be an integer or FP");
1922   assert(Step->getType() == STy && "Step has wrong type");
1923 
1924   SmallVector<Constant *, 8> Indices;
1925 
1926   if (STy->isIntegerTy()) {
1927     // Create a vector of consecutive numbers from zero to VF.
1928     for (int i = 0; i < VLen; ++i)
1929       Indices.push_back(ConstantInt::get(STy, StartIdx + i));
1930 
1931     // Add the consecutive indices to the vector value.
1932     Constant *Cv = ConstantVector::get(Indices);
1933     assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
1934     Step = Builder.CreateVectorSplat(VLen, Step);
1935     assert(Step->getType() == Val->getType() && "Invalid step vec");
1936     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
1937     // which can be found from the original scalar operations.
1938     Step = Builder.CreateMul(Cv, Step);
1939     return Builder.CreateAdd(Val, Step, "induction");
1940   }
1941 
1942   // Floating point induction.
1943   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
1944          "Binary Opcode should be specified for FP induction");
1945   // Create a vector of consecutive numbers from zero to VF.
1946   for (int i = 0; i < VLen; ++i)
1947     Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
1948 
1949   // Add the consecutive indices to the vector value.
1950   Constant *Cv = ConstantVector::get(Indices);
1951 
1952   Step = Builder.CreateVectorSplat(VLen, Step);
1953 
1954   // Floating point operations had to be 'fast' to enable the induction.
1955   FastMathFlags Flags;
1956   Flags.setFast();
1957 
1958   Value *MulOp = Builder.CreateFMul(Cv, Step);
1959   if (isa<Instruction>(MulOp))
1960     // Have to check, MulOp may be a constant
1961     cast<Instruction>(MulOp)->setFastMathFlags(Flags);
1962 
1963   Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
1964   if (isa<Instruction>(BOp))
1965     cast<Instruction>(BOp)->setFastMathFlags(Flags);
1966   return BOp;
1967 }
1968 
1969 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
1970                                            Instruction *EntryVal,
1971                                            const InductionDescriptor &ID) {
1972   // We shouldn't have to build scalar steps if we aren't vectorizing.
1973   assert(VF > 1 && "VF should be greater than one");
1974 
1975   // Get the value type and ensure it and the step have the same integer type.
1976   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
1977   assert(ScalarIVTy == Step->getType() &&
1978          "Val and Step should have the same type");
1979 
1980   // We build scalar steps for both integer and floating-point induction
1981   // variables. Here, we determine the kind of arithmetic we will perform.
1982   Instruction::BinaryOps AddOp;
1983   Instruction::BinaryOps MulOp;
1984   if (ScalarIVTy->isIntegerTy()) {
1985     AddOp = Instruction::Add;
1986     MulOp = Instruction::Mul;
1987   } else {
1988     AddOp = ID.getInductionOpcode();
1989     MulOp = Instruction::FMul;
1990   }
1991 
1992   // Determine the number of scalars we need to generate for each unroll
1993   // iteration. If EntryVal is uniform, we only need to generate the first
1994   // lane. Otherwise, we generate all VF values.
1995   unsigned Lanes =
1996       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1
1997                                                                          : VF;
1998   // Compute the scalar steps and save the results in VectorLoopValueMap.
1999   for (unsigned Part = 0; Part < UF; ++Part) {
2000     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2001       auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane);
2002       auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
2003       auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
2004       VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add);
2005       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane);
2006     }
2007   }
2008 }
2009 
2010 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) {
2011   assert(V != Induction && "The new induction variable should not be used.");
2012   assert(!V->getType()->isVectorTy() && "Can't widen a vector");
2013   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2014 
2015   // If we have a stride that is replaced by one, do it here. Defer this for
2016   // the VPlan-native path until we start running Legal checks in that path.
2017   if (!EnableVPlanNativePath && Legal->hasStride(V))
2018     V = ConstantInt::get(V->getType(), 1);
2019 
2020   // If we have a vector mapped to this value, return it.
2021   if (VectorLoopValueMap.hasVectorValue(V, Part))
2022     return VectorLoopValueMap.getVectorValue(V, Part);
2023 
2024   // If the value has not been vectorized, check if it has been scalarized
2025   // instead. If it has been scalarized, and we actually need the value in
2026   // vector form, we will construct the vector values on demand.
2027   if (VectorLoopValueMap.hasAnyScalarValue(V)) {
2028     Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0});
2029 
2030     // If we've scalarized a value, that value should be an instruction.
2031     auto *I = cast<Instruction>(V);
2032 
2033     // If we aren't vectorizing, we can just copy the scalar map values over to
2034     // the vector map.
2035     if (VF == 1) {
2036       VectorLoopValueMap.setVectorValue(V, Part, ScalarValue);
2037       return ScalarValue;
2038     }
2039 
2040     // Get the last scalar instruction we generated for V and Part. If the value
2041     // is known to be uniform after vectorization, this corresponds to lane zero
2042     // of the Part unroll iteration. Otherwise, the last instruction is the one
2043     // we created for the last vector lane of the Part unroll iteration.
2044     unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1;
2045     auto *LastInst = cast<Instruction>(
2046         VectorLoopValueMap.getScalarValue(V, {Part, LastLane}));
2047 
2048     // Set the insert point after the last scalarized instruction. This ensures
2049     // the insertelement sequence will directly follow the scalar definitions.
2050     auto OldIP = Builder.saveIP();
2051     auto NewIP = std::next(BasicBlock::iterator(LastInst));
2052     Builder.SetInsertPoint(&*NewIP);
2053 
2054     // However, if we are vectorizing, we need to construct the vector values.
2055     // If the value is known to be uniform after vectorization, we can just
2056     // broadcast the scalar value corresponding to lane zero for each unroll
2057     // iteration. Otherwise, we construct the vector values using insertelement
2058     // instructions. Since the resulting vectors are stored in
2059     // VectorLoopValueMap, we will only generate the insertelements once.
2060     Value *VectorValue = nullptr;
2061     if (Cost->isUniformAfterVectorization(I, VF)) {
2062       VectorValue = getBroadcastInstrs(ScalarValue);
2063       VectorLoopValueMap.setVectorValue(V, Part, VectorValue);
2064     } else {
2065       // Initialize packing with insertelements to start from undef.
2066       Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF));
2067       VectorLoopValueMap.setVectorValue(V, Part, Undef);
2068       for (unsigned Lane = 0; Lane < VF; ++Lane)
2069         packScalarIntoVectorValue(V, {Part, Lane});
2070       VectorValue = VectorLoopValueMap.getVectorValue(V, Part);
2071     }
2072     Builder.restoreIP(OldIP);
2073     return VectorValue;
2074   }
2075 
2076   // If this scalar is unknown, assume that it is a constant or that it is
2077   // loop invariant. Broadcast V and save the value for future uses.
2078   Value *B = getBroadcastInstrs(V);
2079   VectorLoopValueMap.setVectorValue(V, Part, B);
2080   return B;
2081 }
2082 
2083 Value *
2084 InnerLoopVectorizer::getOrCreateScalarValue(Value *V,
2085                                             const VPIteration &Instance) {
2086   // If the value is not an instruction contained in the loop, it should
2087   // already be scalar.
2088   if (OrigLoop->isLoopInvariant(V))
2089     return V;
2090 
2091   assert(Instance.Lane > 0
2092              ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
2093              : true && "Uniform values only have lane zero");
2094 
2095   // If the value from the original loop has not been vectorized, it is
2096   // represented by UF x VF scalar values in the new loop. Return the requested
2097   // scalar value.
2098   if (VectorLoopValueMap.hasScalarValue(V, Instance))
2099     return VectorLoopValueMap.getScalarValue(V, Instance);
2100 
2101   // If the value has not been scalarized, get its entry in VectorLoopValueMap
2102   // for the given unroll part. If this entry is not a vector type (i.e., the
2103   // vectorization factor is one), there is no need to generate an
2104   // extractelement instruction.
2105   auto *U = getOrCreateVectorValue(V, Instance.Part);
2106   if (!U->getType()->isVectorTy()) {
2107     assert(VF == 1 && "Value not scalarized has non-vector type");
2108     return U;
2109   }
2110 
2111   // Otherwise, the value from the original loop has been vectorized and is
2112   // represented by UF vector values. Extract and return the requested scalar
2113   // value from the appropriate vector lane.
2114   return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane));
2115 }
2116 
2117 void InnerLoopVectorizer::packScalarIntoVectorValue(
2118     Value *V, const VPIteration &Instance) {
2119   assert(V != Induction && "The new induction variable should not be used.");
2120   assert(!V->getType()->isVectorTy() && "Can't pack a vector");
2121   assert(!V->getType()->isVoidTy() && "Type does not produce a value");
2122 
2123   Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance);
2124   Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part);
2125   VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst,
2126                                             Builder.getInt32(Instance.Lane));
2127   VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue);
2128 }
2129 
2130 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2131   assert(Vec->getType()->isVectorTy() && "Invalid type");
2132   SmallVector<Constant *, 8> ShuffleMask;
2133   for (unsigned i = 0; i < VF; ++i)
2134     ShuffleMask.push_back(Builder.getInt32(VF - i - 1));
2135 
2136   return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
2137                                      ConstantVector::get(ShuffleMask),
2138                                      "reverse");
2139 }
2140 
2141 // Return whether we allow using masked interleave-groups (for dealing with
2142 // strided loads/stores that reside in predicated blocks, or for dealing
2143 // with gaps).
2144 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2145   // If an override option has been passed in for interleaved accesses, use it.
2146   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2147     return EnableMaskedInterleavedMemAccesses;
2148 
2149   return TTI.enableMaskedInterleavedAccessVectorization();
2150 }
2151 
2152 // Try to vectorize the interleave group that \p Instr belongs to.
2153 //
2154 // E.g. Translate following interleaved load group (factor = 3):
2155 //   for (i = 0; i < N; i+=3) {
2156 //     R = Pic[i];             // Member of index 0
2157 //     G = Pic[i+1];           // Member of index 1
2158 //     B = Pic[i+2];           // Member of index 2
2159 //     ... // do something to R, G, B
2160 //   }
2161 // To:
2162 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2163 //   %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9>   ; R elements
2164 //   %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10>  ; G elements
2165 //   %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11>  ; B elements
2166 //
2167 // Or translate following interleaved store group (factor = 3):
2168 //   for (i = 0; i < N; i+=3) {
2169 //     ... do something to R, G, B
2170 //     Pic[i]   = R;           // Member of index 0
2171 //     Pic[i+1] = G;           // Member of index 1
2172 //     Pic[i+2] = B;           // Member of index 2
2173 //   }
2174 // To:
2175 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2176 //   %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u>
2177 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2178 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2179 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2180 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2181     const InterleaveGroup<Instruction> *Group, VPTransformState &State,
2182     VPValue *Addr, VPValue *BlockInMask) {
2183   Instruction *Instr = Group->getInsertPos();
2184   const DataLayout &DL = Instr->getModule()->getDataLayout();
2185 
2186   // Prepare for the vector type of the interleaved load/store.
2187   Type *ScalarTy = getMemInstValueType(Instr);
2188   unsigned InterleaveFactor = Group->getFactor();
2189   Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF);
2190 
2191   // Prepare for the new pointers.
2192   SmallVector<Value *, 2> AddrParts;
2193   unsigned Index = Group->getIndex(Instr);
2194 
2195   // TODO: extend the masked interleaved-group support to reversed access.
2196   assert((!BlockInMask || !Group->isReverse()) &&
2197          "Reversed masked interleave-group not supported.");
2198 
2199   // If the group is reverse, adjust the index to refer to the last vector lane
2200   // instead of the first. We adjust the index from the first vector lane,
2201   // rather than directly getting the pointer for lane VF - 1, because the
2202   // pointer operand of the interleaved access is supposed to be uniform. For
2203   // uniform instructions, we're only required to generate a value for the
2204   // first vector lane in each unroll iteration.
2205   if (Group->isReverse())
2206     Index += (VF - 1) * Group->getFactor();
2207 
2208   for (unsigned Part = 0; Part < UF; Part++) {
2209     Value *AddrPart = State.get(Addr, {Part, 0});
2210     setDebugLocFromInst(Builder, AddrPart);
2211 
2212     // Notice current instruction could be any index. Need to adjust the address
2213     // to the member of index 0.
2214     //
2215     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2216     //       b = A[i];       // Member of index 0
2217     // Current pointer is pointed to A[i+1], adjust it to A[i].
2218     //
2219     // E.g.  A[i+1] = a;     // Member of index 1
2220     //       A[i]   = b;     // Member of index 0
2221     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2222     // Current pointer is pointed to A[i+2], adjust it to A[i].
2223 
2224     bool InBounds = false;
2225     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2226       InBounds = gep->isInBounds();
2227     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2228     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2229 
2230     // Cast to the vector pointer type.
2231     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2232     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2233     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2234   }
2235 
2236   setDebugLocFromInst(Builder, Instr);
2237   Value *UndefVec = UndefValue::get(VecTy);
2238 
2239   Value *MaskForGaps = nullptr;
2240   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2241     MaskForGaps = createBitMaskForGaps(Builder, VF, *Group);
2242     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2243   }
2244 
2245   // Vectorize the interleaved load group.
2246   if (isa<LoadInst>(Instr)) {
2247     // For each unroll part, create a wide load for the group.
2248     SmallVector<Value *, 2> NewLoads;
2249     for (unsigned Part = 0; Part < UF; Part++) {
2250       Instruction *NewLoad;
2251       if (BlockInMask || MaskForGaps) {
2252         assert(useMaskedInterleavedAccesses(*TTI) &&
2253                "masked interleaved groups are not allowed.");
2254         Value *GroupMask = MaskForGaps;
2255         if (BlockInMask) {
2256           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2257           auto *Undefs = UndefValue::get(BlockInMaskPart->getType());
2258           auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF);
2259           Value *ShuffledMask = Builder.CreateShuffleVector(
2260               BlockInMaskPart, Undefs, RepMask, "interleaved.mask");
2261           GroupMask = MaskForGaps
2262                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2263                                                 MaskForGaps)
2264                           : ShuffledMask;
2265         }
2266         NewLoad =
2267             Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
2268                                      GroupMask, UndefVec, "wide.masked.vec");
2269       }
2270       else
2271         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2272                                             Group->getAlign(), "wide.vec");
2273       Group->addMetadata(NewLoad);
2274       NewLoads.push_back(NewLoad);
2275     }
2276 
2277     // For each member in the group, shuffle out the appropriate data from the
2278     // wide loads.
2279     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2280       Instruction *Member = Group->getMember(I);
2281 
2282       // Skip the gaps in the group.
2283       if (!Member)
2284         continue;
2285 
2286       Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF);
2287       for (unsigned Part = 0; Part < UF; Part++) {
2288         Value *StridedVec = Builder.CreateShuffleVector(
2289             NewLoads[Part], UndefVec, StrideMask, "strided.vec");
2290 
2291         // If this member has different type, cast the result type.
2292         if (Member->getType() != ScalarTy) {
2293           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2294           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2295         }
2296 
2297         if (Group->isReverse())
2298           StridedVec = reverseVector(StridedVec);
2299 
2300         VectorLoopValueMap.setVectorValue(Member, Part, StridedVec);
2301       }
2302     }
2303     return;
2304   }
2305 
2306   // The sub vector type for current instruction.
2307   VectorType *SubVT = VectorType::get(ScalarTy, VF);
2308 
2309   // Vectorize the interleaved store group.
2310   for (unsigned Part = 0; Part < UF; Part++) {
2311     // Collect the stored vector from each member.
2312     SmallVector<Value *, 4> StoredVecs;
2313     for (unsigned i = 0; i < InterleaveFactor; i++) {
2314       // Interleaved store group doesn't allow a gap, so each index has a member
2315       Instruction *Member = Group->getMember(i);
2316       assert(Member && "Fail to get a member from an interleaved store group");
2317 
2318       Value *StoredVec = getOrCreateVectorValue(
2319           cast<StoreInst>(Member)->getValueOperand(), Part);
2320       if (Group->isReverse())
2321         StoredVec = reverseVector(StoredVec);
2322 
2323       // If this member has different type, cast it to a unified type.
2324 
2325       if (StoredVec->getType() != SubVT)
2326         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2327 
2328       StoredVecs.push_back(StoredVec);
2329     }
2330 
2331     // Concatenate all vectors into a wide vector.
2332     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2333 
2334     // Interleave the elements in the wide vector.
2335     Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor);
2336     Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask,
2337                                               "interleaved.vec");
2338 
2339     Instruction *NewStoreInstr;
2340     if (BlockInMask) {
2341       Value *BlockInMaskPart = State.get(BlockInMask, Part);
2342       auto *Undefs = UndefValue::get(BlockInMaskPart->getType());
2343       auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF);
2344       Value *ShuffledMask = Builder.CreateShuffleVector(
2345           BlockInMaskPart, Undefs, RepMask, "interleaved.mask");
2346       NewStoreInstr = Builder.CreateMaskedStore(
2347           IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
2348     }
2349     else
2350       NewStoreInstr =
2351           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2352 
2353     Group->addMetadata(NewStoreInstr);
2354   }
2355 }
2356 
2357 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
2358                                                      VPTransformState &State,
2359                                                      VPValue *Addr,
2360                                                      VPValue *StoredValue,
2361                                                      VPValue *BlockInMask) {
2362   // Attempt to issue a wide load.
2363   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2364   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2365 
2366   assert((LI || SI) && "Invalid Load/Store instruction");
2367   assert((!SI || StoredValue) && "No stored value provided for widened store");
2368   assert((!LI || !StoredValue) && "Stored value provided for widened load");
2369 
2370   LoopVectorizationCostModel::InstWidening Decision =
2371       Cost->getWideningDecision(Instr, VF);
2372   assert((Decision == LoopVectorizationCostModel::CM_Widen ||
2373           Decision == LoopVectorizationCostModel::CM_Widen_Reverse ||
2374           Decision == LoopVectorizationCostModel::CM_GatherScatter) &&
2375          "CM decision is not to widen the memory instruction");
2376 
2377   Type *ScalarDataTy = getMemInstValueType(Instr);
2378   Type *DataTy = VectorType::get(ScalarDataTy, VF);
2379   // An alignment of 0 means target abi alignment. We need to use the scalar's
2380   // target abi alignment in such a case.
2381   const DataLayout &DL = Instr->getModule()->getDataLayout();
2382   const Align Alignment =
2383       DL.getValueOrABITypeAlignment(getLoadStoreAlignment(Instr), ScalarDataTy);
2384 
2385   // Determine if the pointer operand of the access is either consecutive or
2386   // reverse consecutive.
2387   bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
2388   bool ConsecutiveStride =
2389       Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
2390   bool CreateGatherScatter =
2391       (Decision == LoopVectorizationCostModel::CM_GatherScatter);
2392 
2393   // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
2394   // gather/scatter. Otherwise Decision should have been to Scalarize.
2395   assert((ConsecutiveStride || CreateGatherScatter) &&
2396          "The instruction should be scalarized");
2397   (void)ConsecutiveStride;
2398 
2399   VectorParts BlockInMaskParts(UF);
2400   bool isMaskRequired = BlockInMask;
2401   if (isMaskRequired)
2402     for (unsigned Part = 0; Part < UF; ++Part)
2403       BlockInMaskParts[Part] = State.get(BlockInMask, Part);
2404 
2405   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
2406     // Calculate the pointer for the specific unroll-part.
2407     GetElementPtrInst *PartPtr = nullptr;
2408 
2409     bool InBounds = false;
2410     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
2411       InBounds = gep->isInBounds();
2412 
2413     if (Reverse) {
2414       // If the address is consecutive but reversed, then the
2415       // wide store needs to start at the last vector element.
2416       PartPtr = cast<GetElementPtrInst>(
2417           Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(-Part * VF)));
2418       PartPtr->setIsInBounds(InBounds);
2419       PartPtr = cast<GetElementPtrInst>(
2420           Builder.CreateGEP(ScalarDataTy, PartPtr, Builder.getInt32(1 - VF)));
2421       PartPtr->setIsInBounds(InBounds);
2422       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
2423         BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]);
2424     } else {
2425       PartPtr = cast<GetElementPtrInst>(
2426           Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(Part * VF)));
2427       PartPtr->setIsInBounds(InBounds);
2428     }
2429 
2430     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2431     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2432   };
2433 
2434   // Handle Stores:
2435   if (SI) {
2436     setDebugLocFromInst(Builder, SI);
2437 
2438     for (unsigned Part = 0; Part < UF; ++Part) {
2439       Instruction *NewSI = nullptr;
2440       Value *StoredVal = State.get(StoredValue, Part);
2441       if (CreateGatherScatter) {
2442         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2443         Value *VectorGep = State.get(Addr, Part);
2444         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
2445                                             MaskPart);
2446       } else {
2447         if (Reverse) {
2448           // If we store to reverse consecutive memory locations, then we need
2449           // to reverse the order of elements in the stored value.
2450           StoredVal = reverseVector(StoredVal);
2451           // We don't want to update the value in the map as it might be used in
2452           // another expression. So don't call resetVectorValue(StoredVal).
2453         }
2454         auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
2455         if (isMaskRequired)
2456           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
2457                                             BlockInMaskParts[Part]);
2458         else
2459           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
2460       }
2461       addMetadata(NewSI, SI);
2462     }
2463     return;
2464   }
2465 
2466   // Handle loads.
2467   assert(LI && "Must have a load instruction");
2468   setDebugLocFromInst(Builder, LI);
2469   for (unsigned Part = 0; Part < UF; ++Part) {
2470     Value *NewLI;
2471     if (CreateGatherScatter) {
2472       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
2473       Value *VectorGep = State.get(Addr, Part);
2474       NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
2475                                          nullptr, "wide.masked.gather");
2476       addMetadata(NewLI, LI);
2477     } else {
2478       auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
2479       if (isMaskRequired)
2480         NewLI = Builder.CreateMaskedLoad(
2481             VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy),
2482             "wide.masked.load");
2483       else
2484         NewLI =
2485             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
2486 
2487       // Add metadata to the load, but setVectorValue to the reverse shuffle.
2488       addMetadata(NewLI, LI);
2489       if (Reverse)
2490         NewLI = reverseVector(NewLI);
2491     }
2492     VectorLoopValueMap.setVectorValue(Instr, Part, NewLI);
2493   }
2494 }
2495 
2496 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2497                                                const VPIteration &Instance,
2498                                                bool IfPredicateInstr) {
2499   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2500 
2501   setDebugLocFromInst(Builder, Instr);
2502 
2503   // Does this instruction return a value ?
2504   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2505 
2506   Instruction *Cloned = Instr->clone();
2507   if (!IsVoidRetTy)
2508     Cloned->setName(Instr->getName() + ".cloned");
2509 
2510   // Replace the operands of the cloned instructions with their scalar
2511   // equivalents in the new loop.
2512   for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
2513     auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance);
2514     Cloned->setOperand(op, NewOp);
2515   }
2516   addNewMetadata(Cloned, Instr);
2517 
2518   // Place the cloned scalar in the new loop.
2519   Builder.Insert(Cloned);
2520 
2521   // Add the cloned scalar to the scalar map entry.
2522   VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned);
2523 
2524   // If we just cloned a new assumption, add it the assumption cache.
2525   if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
2526     if (II->getIntrinsicID() == Intrinsic::assume)
2527       AC->registerAssumption(II);
2528 
2529   // End if-block.
2530   if (IfPredicateInstr)
2531     PredicatedInstructions.push_back(Cloned);
2532 }
2533 
2534 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
2535                                                       Value *End, Value *Step,
2536                                                       Instruction *DL) {
2537   BasicBlock *Header = L->getHeader();
2538   BasicBlock *Latch = L->getLoopLatch();
2539   // As we're just creating this loop, it's possible no latch exists
2540   // yet. If so, use the header as this will be a single block loop.
2541   if (!Latch)
2542     Latch = Header;
2543 
2544   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
2545   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
2546   setDebugLocFromInst(Builder, OldInst);
2547   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
2548 
2549   Builder.SetInsertPoint(Latch->getTerminator());
2550   setDebugLocFromInst(Builder, OldInst);
2551 
2552   // Create i+1 and fill the PHINode.
2553   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
2554   Induction->addIncoming(Start, L->getLoopPreheader());
2555   Induction->addIncoming(Next, Latch);
2556   // Create the compare.
2557   Value *ICmp = Builder.CreateICmpEQ(Next, End);
2558   Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
2559 
2560   // Now we have two terminators. Remove the old one from the block.
2561   Latch->getTerminator()->eraseFromParent();
2562 
2563   return Induction;
2564 }
2565 
2566 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
2567   if (TripCount)
2568     return TripCount;
2569 
2570   assert(L && "Create Trip Count for null loop.");
2571   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2572   // Find the loop boundaries.
2573   ScalarEvolution *SE = PSE.getSE();
2574   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2575   assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
2576          "Invalid loop count");
2577 
2578   Type *IdxTy = Legal->getWidestInductionType();
2579   assert(IdxTy && "No type for induction");
2580 
2581   // The exit count might have the type of i64 while the phi is i32. This can
2582   // happen if we have an induction variable that is sign extended before the
2583   // compare. The only way that we get a backedge taken count is that the
2584   // induction variable was signed and as such will not overflow. In such a case
2585   // truncation is legal.
2586   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
2587       IdxTy->getPrimitiveSizeInBits())
2588     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2589   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2590 
2591   // Get the total trip count from the count by adding 1.
2592   const SCEV *ExitCount = SE->getAddExpr(
2593       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2594 
2595   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
2596 
2597   // Expand the trip count and place the new instructions in the preheader.
2598   // Notice that the pre-header does not change, only the loop body.
2599   SCEVExpander Exp(*SE, DL, "induction");
2600 
2601   // Count holds the overall loop count (N).
2602   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2603                                 L->getLoopPreheader()->getTerminator());
2604 
2605   if (TripCount->getType()->isPointerTy())
2606     TripCount =
2607         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2608                                     L->getLoopPreheader()->getTerminator());
2609 
2610   return TripCount;
2611 }
2612 
2613 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
2614   if (VectorTripCount)
2615     return VectorTripCount;
2616 
2617   Value *TC = getOrCreateTripCount(L);
2618   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2619 
2620   Type *Ty = TC->getType();
2621   Constant *Step = ConstantInt::get(Ty, VF * UF);
2622 
2623   // If the tail is to be folded by masking, round the number of iterations N
2624   // up to a multiple of Step instead of rounding down. This is done by first
2625   // adding Step-1 and then rounding down. Note that it's ok if this addition
2626   // overflows: the vector induction variable will eventually wrap to zero given
2627   // that it starts at zero and its Step is a power of two; the loop will then
2628   // exit, with the last early-exit vector comparison also producing all-true.
2629   if (Cost->foldTailByMasking()) {
2630     assert(isPowerOf2_32(VF * UF) &&
2631            "VF*UF must be a power of 2 when folding tail by masking");
2632     TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up");
2633   }
2634 
2635   // Now we need to generate the expression for the part of the loop that the
2636   // vectorized body will execute. This is equal to N - (N % Step) if scalar
2637   // iterations are not required for correctness, or N - Step, otherwise. Step
2638   // is equal to the vectorization factor (number of SIMD elements) times the
2639   // unroll factor (number of SIMD instructions).
2640   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2641 
2642   // If there is a non-reversed interleaved group that may speculatively access
2643   // memory out-of-bounds, we need to ensure that there will be at least one
2644   // iteration of the scalar epilogue loop. Thus, if the step evenly divides
2645   // the trip count, we set the remainder to be equal to the step. If the step
2646   // does not evenly divide the trip count, no adjustment is necessary since
2647   // there will already be scalar iterations. Note that the minimum iterations
2648   // check ensures that N >= Step.
2649   if (VF > 1 && Cost->requiresScalarEpilogue()) {
2650     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2651     R = Builder.CreateSelect(IsZero, Step, R);
2652   }
2653 
2654   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2655 
2656   return VectorTripCount;
2657 }
2658 
2659 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
2660                                                    const DataLayout &DL) {
2661   // Verify that V is a vector type with same number of elements as DstVTy.
2662   unsigned VF = DstVTy->getNumElements();
2663   VectorType *SrcVecTy = cast<VectorType>(V->getType());
2664   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
2665   Type *SrcElemTy = SrcVecTy->getElementType();
2666   Type *DstElemTy = DstVTy->getElementType();
2667   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2668          "Vector elements must have same size");
2669 
2670   // Do a direct cast if element types are castable.
2671   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2672     return Builder.CreateBitOrPointerCast(V, DstVTy);
2673   }
2674   // V cannot be directly casted to desired vector type.
2675   // May happen when V is a floating point vector but DstVTy is a vector of
2676   // pointers or vice-versa. Handle this using a two-step bitcast using an
2677   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2678   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2679          "Only one type should be a pointer type");
2680   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2681          "Only one type should be a floating point type");
2682   Type *IntTy =
2683       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2684   VectorType *VecIntTy = VectorType::get(IntTy, VF);
2685   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2686   return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
2687 }
2688 
2689 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
2690                                                          BasicBlock *Bypass) {
2691   Value *Count = getOrCreateTripCount(L);
2692   // Reuse existing vector loop preheader for TC checks.
2693   // Note that new preheader block is generated for vector loop.
2694   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
2695   IRBuilder<> Builder(TCCheckBlock->getTerminator());
2696 
2697   // Generate code to check if the loop's trip count is less than VF * UF, or
2698   // equal to it in case a scalar epilogue is required; this implies that the
2699   // vector trip count is zero. This check also covers the case where adding one
2700   // to the backedge-taken count overflowed leading to an incorrect trip count
2701   // of zero. In this case we will also jump to the scalar loop.
2702   auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
2703                                           : ICmpInst::ICMP_ULT;
2704 
2705   // If tail is to be folded, vector loop takes care of all iterations.
2706   Value *CheckMinIters = Builder.getFalse();
2707   if (!Cost->foldTailByMasking())
2708     CheckMinIters = Builder.CreateICmp(
2709         P, Count, ConstantInt::get(Count->getType(), VF * UF),
2710         "min.iters.check");
2711 
2712   // Create new preheader for vector loop.
2713   LoopVectorPreHeader =
2714       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
2715                  "vector.ph");
2716 
2717   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
2718                                DT->getNode(Bypass)->getIDom()) &&
2719          "TC check is expected to dominate Bypass");
2720 
2721   // Update dominator for Bypass & LoopExit.
2722   DT->changeImmediateDominator(Bypass, TCCheckBlock);
2723   DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
2724 
2725   ReplaceInstWithInst(
2726       TCCheckBlock->getTerminator(),
2727       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
2728   LoopBypassBlocks.push_back(TCCheckBlock);
2729 }
2730 
2731 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
2732   // Reuse existing vector loop preheader for SCEV checks.
2733   // Note that new preheader block is generated for vector loop.
2734   BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader;
2735 
2736   // Generate the code to check that the SCEV assumptions that we made.
2737   // We want the new basic block to start at the first instruction in a
2738   // sequence of instructions that form a check.
2739   SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
2740                    "scev.check");
2741   Value *SCEVCheck = Exp.expandCodeForPredicate(
2742       &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator());
2743 
2744   if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
2745     if (C->isZero())
2746       return;
2747 
2748   assert(!SCEVCheckBlock->getParent()->hasOptSize() &&
2749          "Cannot SCEV check stride or overflow when optimizing for size");
2750 
2751   SCEVCheckBlock->setName("vector.scevcheck");
2752   // Create new preheader for vector loop.
2753   LoopVectorPreHeader =
2754       SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI,
2755                  nullptr, "vector.ph");
2756 
2757   // Update dominator only if this is first RT check.
2758   if (LoopBypassBlocks.empty()) {
2759     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
2760     DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
2761   }
2762 
2763   ReplaceInstWithInst(
2764       SCEVCheckBlock->getTerminator(),
2765       BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck));
2766   LoopBypassBlocks.push_back(SCEVCheckBlock);
2767   AddedSafetyChecks = true;
2768 }
2769 
2770 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
2771   // VPlan-native path does not do any analysis for runtime checks currently.
2772   if (EnableVPlanNativePath)
2773     return;
2774 
2775   // Reuse existing vector loop preheader for runtime memory checks.
2776   // Note that new preheader block is generated for vector loop.
2777   BasicBlock *const MemCheckBlock = L->getLoopPreheader();
2778 
2779   // Generate the code that checks in runtime if arrays overlap. We put the
2780   // checks into a separate block to make the more common case of few elements
2781   // faster.
2782   Instruction *FirstCheckInst;
2783   Instruction *MemRuntimeCheck;
2784   std::tie(FirstCheckInst, MemRuntimeCheck) =
2785       Legal->getLAI()->addRuntimeChecks(MemCheckBlock->getTerminator());
2786   if (!MemRuntimeCheck)
2787     return;
2788 
2789   if (MemCheckBlock->getParent()->hasOptSize()) {
2790     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
2791            "Cannot emit memory checks when optimizing for size, unless forced "
2792            "to vectorize.");
2793     ORE->emit([&]() {
2794       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
2795                                         L->getStartLoc(), L->getHeader())
2796              << "Code-size may be reduced by not forcing "
2797                 "vectorization, or by source-code modifications "
2798                 "eliminating the need for runtime checks "
2799                 "(e.g., adding 'restrict').";
2800     });
2801   }
2802 
2803   MemCheckBlock->setName("vector.memcheck");
2804   // Create new preheader for vector loop.
2805   LoopVectorPreHeader =
2806       SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr,
2807                  "vector.ph");
2808 
2809   // Update dominator only if this is first RT check.
2810   if (LoopBypassBlocks.empty()) {
2811     DT->changeImmediateDominator(Bypass, MemCheckBlock);
2812     DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock);
2813   }
2814 
2815   ReplaceInstWithInst(
2816       MemCheckBlock->getTerminator(),
2817       BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheck));
2818   LoopBypassBlocks.push_back(MemCheckBlock);
2819   AddedSafetyChecks = true;
2820 
2821   // We currently don't use LoopVersioning for the actual loop cloning but we
2822   // still use it to add the noalias metadata.
2823   LVer = std::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
2824                                           PSE.getSE());
2825   LVer->prepareNoAliasMetadata();
2826 }
2827 
2828 Value *InnerLoopVectorizer::emitTransformedIndex(
2829     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
2830     const InductionDescriptor &ID) const {
2831 
2832   SCEVExpander Exp(*SE, DL, "induction");
2833   auto Step = ID.getStep();
2834   auto StartValue = ID.getStartValue();
2835   assert(Index->getType() == Step->getType() &&
2836          "Index type does not match StepValue type");
2837 
2838   // Note: the IR at this point is broken. We cannot use SE to create any new
2839   // SCEV and then expand it, hoping that SCEV's simplification will give us
2840   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
2841   // lead to various SCEV crashes. So all we can do is to use builder and rely
2842   // on InstCombine for future simplifications. Here we handle some trivial
2843   // cases only.
2844   auto CreateAdd = [&B](Value *X, Value *Y) {
2845     assert(X->getType() == Y->getType() && "Types don't match!");
2846     if (auto *CX = dyn_cast<ConstantInt>(X))
2847       if (CX->isZero())
2848         return Y;
2849     if (auto *CY = dyn_cast<ConstantInt>(Y))
2850       if (CY->isZero())
2851         return X;
2852     return B.CreateAdd(X, Y);
2853   };
2854 
2855   auto CreateMul = [&B](Value *X, Value *Y) {
2856     assert(X->getType() == Y->getType() && "Types don't match!");
2857     if (auto *CX = dyn_cast<ConstantInt>(X))
2858       if (CX->isOne())
2859         return Y;
2860     if (auto *CY = dyn_cast<ConstantInt>(Y))
2861       if (CY->isOne())
2862         return X;
2863     return B.CreateMul(X, Y);
2864   };
2865 
2866   switch (ID.getKind()) {
2867   case InductionDescriptor::IK_IntInduction: {
2868     assert(Index->getType() == StartValue->getType() &&
2869            "Index type does not match StartValue type");
2870     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
2871       return B.CreateSub(StartValue, Index);
2872     auto *Offset = CreateMul(
2873         Index, Exp.expandCodeFor(Step, Index->getType(), &*B.GetInsertPoint()));
2874     return CreateAdd(StartValue, Offset);
2875   }
2876   case InductionDescriptor::IK_PtrInduction: {
2877     assert(isa<SCEVConstant>(Step) &&
2878            "Expected constant step for pointer induction");
2879     return B.CreateGEP(
2880         StartValue->getType()->getPointerElementType(), StartValue,
2881         CreateMul(Index, Exp.expandCodeFor(Step, Index->getType(),
2882                                            &*B.GetInsertPoint())));
2883   }
2884   case InductionDescriptor::IK_FpInduction: {
2885     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
2886     auto InductionBinOp = ID.getInductionBinOp();
2887     assert(InductionBinOp &&
2888            (InductionBinOp->getOpcode() == Instruction::FAdd ||
2889             InductionBinOp->getOpcode() == Instruction::FSub) &&
2890            "Original bin op should be defined for FP induction");
2891 
2892     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
2893 
2894     // Floating point operations had to be 'fast' to enable the induction.
2895     FastMathFlags Flags;
2896     Flags.setFast();
2897 
2898     Value *MulExp = B.CreateFMul(StepValue, Index);
2899     if (isa<Instruction>(MulExp))
2900       // We have to check, the MulExp may be a constant.
2901       cast<Instruction>(MulExp)->setFastMathFlags(Flags);
2902 
2903     Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
2904                                "induction");
2905     if (isa<Instruction>(BOp))
2906       cast<Instruction>(BOp)->setFastMathFlags(Flags);
2907 
2908     return BOp;
2909   }
2910   case InductionDescriptor::IK_NoInduction:
2911     return nullptr;
2912   }
2913   llvm_unreachable("invalid enum");
2914 }
2915 
2916 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
2917   /*
2918    In this function we generate a new loop. The new loop will contain
2919    the vectorized instructions while the old loop will continue to run the
2920    scalar remainder.
2921 
2922        [ ] <-- loop iteration number check.
2923     /   |
2924    /    v
2925   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
2926   |  /  |
2927   | /   v
2928   ||   [ ]     <-- vector pre header.
2929   |/    |
2930   |     v
2931   |    [  ] \
2932   |    [  ]_|   <-- vector loop.
2933   |     |
2934   |     v
2935   |   -[ ]   <--- middle-block.
2936   |  /  |
2937   | /   v
2938   -|- >[ ]     <--- new preheader.
2939    |    |
2940    |    v
2941    |   [ ] \
2942    |   [ ]_|   <-- old scalar loop to handle remainder.
2943     \   |
2944      \  v
2945       >[ ]     <-- exit block.
2946    ...
2947    */
2948 
2949   MDNode *OrigLoopID = OrigLoop->getLoopID();
2950 
2951   // Some loops have a single integer induction variable, while other loops
2952   // don't. One example is c++ iterators that often have multiple pointer
2953   // induction variables. In the code below we also support a case where we
2954   // don't have a single induction variable.
2955   //
2956   // We try to obtain an induction variable from the original loop as hard
2957   // as possible. However if we don't find one that:
2958   //   - is an integer
2959   //   - counts from zero, stepping by one
2960   //   - is the size of the widest induction variable type
2961   // then we create a new one.
2962   OldInduction = Legal->getPrimaryInduction();
2963   Type *IdxTy = Legal->getWidestInductionType();
2964 
2965   // Split the single block loop into the two loop structure described above.
2966   LoopScalarBody = OrigLoop->getHeader();
2967   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
2968   LoopExitBlock = OrigLoop->getExitBlock();
2969   assert(LoopExitBlock && "Must have an exit block");
2970   assert(LoopVectorPreHeader && "Invalid loop structure");
2971 
2972   LoopMiddleBlock =
2973       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
2974                  LI, nullptr, "middle.block");
2975   LoopScalarPreHeader =
2976       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
2977                  nullptr, "scalar.ph");
2978   // We intentionally don't let SplitBlock to update LoopInfo since
2979   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
2980   // LoopVectorBody is explicitly added to the correct place few lines later.
2981   LoopVectorBody =
2982       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
2983                  nullptr, nullptr, "vector.body");
2984 
2985   // Update dominator for loop exit.
2986   DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
2987 
2988   // Create and register the new vector loop.
2989   Loop *Lp = LI->AllocateLoop();
2990   Loop *ParentLoop = OrigLoop->getParentLoop();
2991 
2992   // Insert the new loop into the loop nest and register the new basic blocks
2993   // before calling any utilities such as SCEV that require valid LoopInfo.
2994   if (ParentLoop) {
2995     ParentLoop->addChildLoop(Lp);
2996   } else {
2997     LI->addTopLevelLoop(Lp);
2998   }
2999   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3000 
3001   // Find the loop boundaries.
3002   Value *Count = getOrCreateTripCount(Lp);
3003 
3004   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3005 
3006   // Now, compare the new count to zero. If it is zero skip the vector loop and
3007   // jump to the scalar loop. This check also covers the case where the
3008   // backedge-taken count is uint##_max: adding one to it will overflow leading
3009   // to an incorrect trip count of zero. In this (rare) case we will also jump
3010   // to the scalar loop.
3011   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3012 
3013   // Generate the code to check any assumptions that we've made for SCEV
3014   // expressions.
3015   emitSCEVChecks(Lp, LoopScalarPreHeader);
3016 
3017   // Generate the code that checks in runtime if arrays overlap. We put the
3018   // checks into a separate block to make the more common case of few elements
3019   // faster.
3020   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3021 
3022   // Generate the induction variable.
3023   // The loop step is equal to the vectorization factor (num of SIMD elements)
3024   // times the unroll factor (num of SIMD instructions).
3025   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3026   Constant *Step = ConstantInt::get(IdxTy, VF * UF);
3027   Induction =
3028       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3029                               getDebugLocFromInstOrOperands(OldInduction));
3030 
3031   // We are going to resume the execution of the scalar loop.
3032   // Go over all of the induction variables that we found and fix the
3033   // PHIs that are left in the scalar version of the loop.
3034   // The starting values of PHI nodes depend on the counter of the last
3035   // iteration in the vectorized loop.
3036   // If we come from a bypass edge then we need to start from the original
3037   // start value.
3038 
3039   // This variable saves the new starting index for the scalar loop. It is used
3040   // to test if there are any tail iterations left once the vector loop has
3041   // completed.
3042   for (auto &InductionEntry : Legal->getInductionVars()) {
3043     PHINode *OrigPhi = InductionEntry.first;
3044     InductionDescriptor II = InductionEntry.second;
3045 
3046     // Create phi nodes to merge from the  backedge-taken check block.
3047     PHINode *BCResumeVal =
3048         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3049                         LoopScalarPreHeader->getTerminator());
3050     // Copy original phi DL over to the new one.
3051     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3052     Value *&EndValue = IVEndValues[OrigPhi];
3053     if (OrigPhi == OldInduction) {
3054       // We know what the end value is.
3055       EndValue = CountRoundDown;
3056     } else {
3057       IRBuilder<> B(Lp->getLoopPreheader()->getTerminator());
3058       Type *StepType = II.getStep()->getType();
3059       Instruction::CastOps CastOp =
3060           CastInst::getCastOpcode(CountRoundDown, true, StepType, true);
3061       Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd");
3062       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3063       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3064       EndValue->setName("ind.end");
3065     }
3066 
3067     // The new PHI merges the original incoming value, in case of a bypass,
3068     // or the value at the end of the vectorized loop.
3069     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3070 
3071     // Fix the scalar body counter (PHI node).
3072     // The old induction's phi node in the scalar body needs the truncated
3073     // value.
3074     for (BasicBlock *BB : LoopBypassBlocks)
3075       BCResumeVal->addIncoming(II.getStartValue(), BB);
3076     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3077   }
3078 
3079   // We need the OrigLoop (scalar loop part) latch terminator to help
3080   // produce correct debug info for the middle block BB instructions.
3081   // The legality check stage guarantees that the loop will have a single
3082   // latch.
3083   assert(isa<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()) &&
3084          "Scalar loop latch terminator isn't a branch");
3085   BranchInst *ScalarLatchBr =
3086       cast<BranchInst>(OrigLoop->getLoopLatch()->getTerminator());
3087 
3088   // Add a check in the middle block to see if we have completed
3089   // all of the iterations in the first vector loop.
3090   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3091   // If tail is to be folded, we know we don't need to run the remainder.
3092   Value *CmpN = Builder.getTrue();
3093   if (!Cost->foldTailByMasking()) {
3094     CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
3095                            CountRoundDown, "cmp.n",
3096                            LoopMiddleBlock->getTerminator());
3097 
3098     // Here we use the same DebugLoc as the scalar loop latch branch instead
3099     // of the corresponding compare because they may have ended up with
3100     // different line numbers and we want to avoid awkward line stepping while
3101     // debugging. Eg. if the compare has got a line number inside the loop.
3102     cast<Instruction>(CmpN)->setDebugLoc(ScalarLatchBr->getDebugLoc());
3103   }
3104 
3105   BranchInst *BrInst =
3106       BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, CmpN);
3107   BrInst->setDebugLoc(ScalarLatchBr->getDebugLoc());
3108   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3109 
3110   // Get ready to start creating new instructions into the vectorized body.
3111   assert(LoopVectorPreHeader == Lp->getLoopPreheader() &&
3112          "Inconsistent vector loop preheader");
3113   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3114 
3115   Optional<MDNode *> VectorizedLoopID =
3116       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3117                                       LLVMLoopVectorizeFollowupVectorized});
3118   if (VectorizedLoopID.hasValue()) {
3119     Lp->setLoopID(VectorizedLoopID.getValue());
3120 
3121     // Do not setAlreadyVectorized if loop attributes have been defined
3122     // explicitly.
3123     return LoopVectorPreHeader;
3124   }
3125 
3126   // Keep all loop hints from the original loop on the vector loop (we'll
3127   // replace the vectorizer-specific hints below).
3128   if (MDNode *LID = OrigLoop->getLoopID())
3129     Lp->setLoopID(LID);
3130 
3131   LoopVectorizeHints Hints(Lp, true, *ORE);
3132   Hints.setAlreadyVectorized();
3133 
3134 #ifdef EXPENSIVE_CHECKS
3135   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3136   LI->verify(*DT);
3137 #endif
3138 
3139   return LoopVectorPreHeader;
3140 }
3141 
3142 // Fix up external users of the induction variable. At this point, we are
3143 // in LCSSA form, with all external PHIs that use the IV having one input value,
3144 // coming from the remainder loop. We need those PHIs to also have a correct
3145 // value for the IV when arriving directly from the middle block.
3146 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3147                                        const InductionDescriptor &II,
3148                                        Value *CountRoundDown, Value *EndValue,
3149                                        BasicBlock *MiddleBlock) {
3150   // There are two kinds of external IV usages - those that use the value
3151   // computed in the last iteration (the PHI) and those that use the penultimate
3152   // value (the value that feeds into the phi from the loop latch).
3153   // We allow both, but they, obviously, have different values.
3154 
3155   assert(OrigLoop->getExitBlock() && "Expected a single exit block");
3156 
3157   DenseMap<Value *, Value *> MissingVals;
3158 
3159   // An external user of the last iteration's value should see the value that
3160   // the remainder loop uses to initialize its own IV.
3161   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3162   for (User *U : PostInc->users()) {
3163     Instruction *UI = cast<Instruction>(U);
3164     if (!OrigLoop->contains(UI)) {
3165       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3166       MissingVals[UI] = EndValue;
3167     }
3168   }
3169 
3170   // An external user of the penultimate value need to see EndValue - Step.
3171   // The simplest way to get this is to recompute it from the constituent SCEVs,
3172   // that is Start + (Step * (CRD - 1)).
3173   for (User *U : OrigPhi->users()) {
3174     auto *UI = cast<Instruction>(U);
3175     if (!OrigLoop->contains(UI)) {
3176       const DataLayout &DL =
3177           OrigLoop->getHeader()->getModule()->getDataLayout();
3178       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3179 
3180       IRBuilder<> B(MiddleBlock->getTerminator());
3181       Value *CountMinusOne = B.CreateSub(
3182           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3183       Value *CMO =
3184           !II.getStep()->getType()->isIntegerTy()
3185               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3186                              II.getStep()->getType())
3187               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3188       CMO->setName("cast.cmo");
3189       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3190       Escape->setName("ind.escape");
3191       MissingVals[UI] = Escape;
3192     }
3193   }
3194 
3195   for (auto &I : MissingVals) {
3196     PHINode *PHI = cast<PHINode>(I.first);
3197     // One corner case we have to handle is two IVs "chasing" each-other,
3198     // that is %IV2 = phi [...], [ %IV1, %latch ]
3199     // In this case, if IV1 has an external use, we need to avoid adding both
3200     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3201     // don't already have an incoming value for the middle block.
3202     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3203       PHI->addIncoming(I.second, MiddleBlock);
3204   }
3205 }
3206 
3207 namespace {
3208 
3209 struct CSEDenseMapInfo {
3210   static bool canHandle(const Instruction *I) {
3211     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3212            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3213   }
3214 
3215   static inline Instruction *getEmptyKey() {
3216     return DenseMapInfo<Instruction *>::getEmptyKey();
3217   }
3218 
3219   static inline Instruction *getTombstoneKey() {
3220     return DenseMapInfo<Instruction *>::getTombstoneKey();
3221   }
3222 
3223   static unsigned getHashValue(const Instruction *I) {
3224     assert(canHandle(I) && "Unknown instruction!");
3225     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3226                                                            I->value_op_end()));
3227   }
3228 
3229   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3230     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3231         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3232       return LHS == RHS;
3233     return LHS->isIdenticalTo(RHS);
3234   }
3235 };
3236 
3237 } // end anonymous namespace
3238 
3239 ///Perform cse of induction variable instructions.
3240 static void cse(BasicBlock *BB) {
3241   // Perform simple cse.
3242   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3243   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3244     Instruction *In = &*I++;
3245 
3246     if (!CSEDenseMapInfo::canHandle(In))
3247       continue;
3248 
3249     // Check if we can replace this instruction with any of the
3250     // visited instructions.
3251     if (Instruction *V = CSEMap.lookup(In)) {
3252       In->replaceAllUsesWith(V);
3253       In->eraseFromParent();
3254       continue;
3255     }
3256 
3257     CSEMap[In] = In;
3258   }
3259 }
3260 
3261 unsigned LoopVectorizationCostModel::getVectorCallCost(CallInst *CI,
3262                                                        unsigned VF,
3263                                                        bool &NeedToScalarize) {
3264   Function *F = CI->getCalledFunction();
3265   Type *ScalarRetTy = CI->getType();
3266   SmallVector<Type *, 4> Tys, ScalarTys;
3267   for (auto &ArgOp : CI->arg_operands())
3268     ScalarTys.push_back(ArgOp->getType());
3269 
3270   // Estimate cost of scalarized vector call. The source operands are assumed
3271   // to be vectors, so we need to extract individual elements from there,
3272   // execute VF scalar calls, and then gather the result into the vector return
3273   // value.
3274   unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys);
3275   if (VF == 1)
3276     return ScalarCallCost;
3277 
3278   // Compute corresponding vector type for return value and arguments.
3279   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3280   for (Type *ScalarTy : ScalarTys)
3281     Tys.push_back(ToVectorTy(ScalarTy, VF));
3282 
3283   // Compute costs of unpacking argument values for the scalar calls and
3284   // packing the return values to a vector.
3285   unsigned ScalarizationCost = getScalarizationOverhead(CI, VF);
3286 
3287   unsigned Cost = ScalarCallCost * VF + ScalarizationCost;
3288 
3289   // If we can't emit a vector call for this function, then the currently found
3290   // cost is the cost we need to return.
3291   NeedToScalarize = true;
3292   VFShape Shape = VFShape::get(*CI, {VF, false}, false /*HasGlobalPred*/);
3293   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3294 
3295   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3296     return Cost;
3297 
3298   // If the corresponding vector cost is cheaper, return its cost.
3299   unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys);
3300   if (VectorCallCost < Cost) {
3301     NeedToScalarize = false;
3302     return VectorCallCost;
3303   }
3304   return Cost;
3305 }
3306 
3307 unsigned LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3308                                                             unsigned VF) {
3309   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3310   assert(ID && "Expected intrinsic call!");
3311 
3312   FastMathFlags FMF;
3313   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3314     FMF = FPMO->getFastMathFlags();
3315 
3316   SmallVector<Value *, 4> Operands(CI->arg_operands());
3317   return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF, CI);
3318 }
3319 
3320 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3321   auto *I1 = cast<IntegerType>(T1->getVectorElementType());
3322   auto *I2 = cast<IntegerType>(T2->getVectorElementType());
3323   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3324 }
3325 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3326   auto *I1 = cast<IntegerType>(T1->getVectorElementType());
3327   auto *I2 = cast<IntegerType>(T2->getVectorElementType());
3328   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3329 }
3330 
3331 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
3332   // For every instruction `I` in MinBWs, truncate the operands, create a
3333   // truncated version of `I` and reextend its result. InstCombine runs
3334   // later and will remove any ext/trunc pairs.
3335   SmallPtrSet<Value *, 4> Erased;
3336   for (const auto &KV : Cost->getMinimalBitwidths()) {
3337     // If the value wasn't vectorized, we must maintain the original scalar
3338     // type. The absence of the value from VectorLoopValueMap indicates that it
3339     // wasn't vectorized.
3340     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3341       continue;
3342     for (unsigned Part = 0; Part < UF; ++Part) {
3343       Value *I = getOrCreateVectorValue(KV.first, Part);
3344       if (Erased.find(I) != Erased.end() || I->use_empty() ||
3345           !isa<Instruction>(I))
3346         continue;
3347       Type *OriginalTy = I->getType();
3348       Type *ScalarTruncatedTy =
3349           IntegerType::get(OriginalTy->getContext(), KV.second);
3350       Type *TruncatedTy = VectorType::get(ScalarTruncatedTy,
3351                                           OriginalTy->getVectorNumElements());
3352       if (TruncatedTy == OriginalTy)
3353         continue;
3354 
3355       IRBuilder<> B(cast<Instruction>(I));
3356       auto ShrinkOperand = [&](Value *V) -> Value * {
3357         if (auto *ZI = dyn_cast<ZExtInst>(V))
3358           if (ZI->getSrcTy() == TruncatedTy)
3359             return ZI->getOperand(0);
3360         return B.CreateZExtOrTrunc(V, TruncatedTy);
3361       };
3362 
3363       // The actual instruction modification depends on the instruction type,
3364       // unfortunately.
3365       Value *NewI = nullptr;
3366       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3367         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3368                              ShrinkOperand(BO->getOperand(1)));
3369 
3370         // Any wrapping introduced by shrinking this operation shouldn't be
3371         // considered undefined behavior. So, we can't unconditionally copy
3372         // arithmetic wrapping flags to NewI.
3373         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3374       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3375         NewI =
3376             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3377                          ShrinkOperand(CI->getOperand(1)));
3378       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3379         NewI = B.CreateSelect(SI->getCondition(),
3380                               ShrinkOperand(SI->getTrueValue()),
3381                               ShrinkOperand(SI->getFalseValue()));
3382       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3383         switch (CI->getOpcode()) {
3384         default:
3385           llvm_unreachable("Unhandled cast!");
3386         case Instruction::Trunc:
3387           NewI = ShrinkOperand(CI->getOperand(0));
3388           break;
3389         case Instruction::SExt:
3390           NewI = B.CreateSExtOrTrunc(
3391               CI->getOperand(0),
3392               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3393           break;
3394         case Instruction::ZExt:
3395           NewI = B.CreateZExtOrTrunc(
3396               CI->getOperand(0),
3397               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3398           break;
3399         }
3400       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
3401         auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements();
3402         auto *O0 = B.CreateZExtOrTrunc(
3403             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3404         auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements();
3405         auto *O1 = B.CreateZExtOrTrunc(
3406             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3407 
3408         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
3409       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
3410         // Don't do anything with the operands, just extend the result.
3411         continue;
3412       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3413         auto Elements = IE->getOperand(0)->getType()->getVectorNumElements();
3414         auto *O0 = B.CreateZExtOrTrunc(
3415             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3416         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3417         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3418       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3419         auto Elements = EE->getOperand(0)->getType()->getVectorNumElements();
3420         auto *O0 = B.CreateZExtOrTrunc(
3421             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3422         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3423       } else {
3424         // If we don't know what to do, be conservative and don't do anything.
3425         continue;
3426       }
3427 
3428       // Lastly, extend the result.
3429       NewI->takeName(cast<Instruction>(I));
3430       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3431       I->replaceAllUsesWith(Res);
3432       cast<Instruction>(I)->eraseFromParent();
3433       Erased.insert(I);
3434       VectorLoopValueMap.resetVectorValue(KV.first, Part, Res);
3435     }
3436   }
3437 
3438   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3439   for (const auto &KV : Cost->getMinimalBitwidths()) {
3440     // If the value wasn't vectorized, we must maintain the original scalar
3441     // type. The absence of the value from VectorLoopValueMap indicates that it
3442     // wasn't vectorized.
3443     if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
3444       continue;
3445     for (unsigned Part = 0; Part < UF; ++Part) {
3446       Value *I = getOrCreateVectorValue(KV.first, Part);
3447       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3448       if (Inst && Inst->use_empty()) {
3449         Value *NewI = Inst->getOperand(0);
3450         Inst->eraseFromParent();
3451         VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI);
3452       }
3453     }
3454   }
3455 }
3456 
3457 void InnerLoopVectorizer::fixVectorizedLoop() {
3458   // Insert truncates and extends for any truncated instructions as hints to
3459   // InstCombine.
3460   if (VF > 1)
3461     truncateToMinimalBitwidths();
3462 
3463   // Fix widened non-induction PHIs by setting up the PHI operands.
3464   if (OrigPHIsToFix.size()) {
3465     assert(EnableVPlanNativePath &&
3466            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
3467     fixNonInductionPHIs();
3468   }
3469 
3470   // At this point every instruction in the original loop is widened to a
3471   // vector form. Now we need to fix the recurrences in the loop. These PHI
3472   // nodes are currently empty because we did not want to introduce cycles.
3473   // This is the second stage of vectorizing recurrences.
3474   fixCrossIterationPHIs();
3475 
3476   // Forget the original basic block.
3477   PSE.getSE()->forgetLoop(OrigLoop);
3478 
3479   // Fix-up external users of the induction variables.
3480   for (auto &Entry : Legal->getInductionVars())
3481     fixupIVUsers(Entry.first, Entry.second,
3482                  getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
3483                  IVEndValues[Entry.first], LoopMiddleBlock);
3484 
3485   fixLCSSAPHIs();
3486   for (Instruction *PI : PredicatedInstructions)
3487     sinkScalarOperands(&*PI);
3488 
3489   // Remove redundant induction instructions.
3490   cse(LoopVectorBody);
3491 
3492   // Set/update profile weights for the vector and remainder loops as original
3493   // loop iterations are now distributed among them. Note that original loop
3494   // represented by LoopScalarBody becomes remainder loop after vectorization.
3495   //
3496   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
3497   // end up getting slightly roughened result but that should be OK since
3498   // profile is not inherently precise anyway. Note also possible bypass of
3499   // vector code caused by legality checks is ignored, assigning all the weight
3500   // to the vector loop, optimistically.
3501   setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody),
3502                                LI->getLoopFor(LoopVectorBody),
3503                                LI->getLoopFor(LoopScalarBody), VF * UF);
3504 }
3505 
3506 void InnerLoopVectorizer::fixCrossIterationPHIs() {
3507   // In order to support recurrences we need to be able to vectorize Phi nodes.
3508   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3509   // stage #2: We now need to fix the recurrences by adding incoming edges to
3510   // the currently empty PHI nodes. At this point every instruction in the
3511   // original loop is widened to a vector form so we can use them to construct
3512   // the incoming edges.
3513   for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
3514     // Handle first-order recurrences and reductions that need to be fixed.
3515     if (Legal->isFirstOrderRecurrence(&Phi))
3516       fixFirstOrderRecurrence(&Phi);
3517     else if (Legal->isReductionVariable(&Phi))
3518       fixReduction(&Phi);
3519   }
3520 }
3521 
3522 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
3523   // This is the second phase of vectorizing first-order recurrences. An
3524   // overview of the transformation is described below. Suppose we have the
3525   // following loop.
3526   //
3527   //   for (int i = 0; i < n; ++i)
3528   //     b[i] = a[i] - a[i - 1];
3529   //
3530   // There is a first-order recurrence on "a". For this loop, the shorthand
3531   // scalar IR looks like:
3532   //
3533   //   scalar.ph:
3534   //     s_init = a[-1]
3535   //     br scalar.body
3536   //
3537   //   scalar.body:
3538   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3539   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3540   //     s2 = a[i]
3541   //     b[i] = s2 - s1
3542   //     br cond, scalar.body, ...
3543   //
3544   // In this example, s1 is a recurrence because it's value depends on the
3545   // previous iteration. In the first phase of vectorization, we created a
3546   // temporary value for s1. We now complete the vectorization and produce the
3547   // shorthand vector IR shown below (for VF = 4, UF = 1).
3548   //
3549   //   vector.ph:
3550   //     v_init = vector(..., ..., ..., a[-1])
3551   //     br vector.body
3552   //
3553   //   vector.body
3554   //     i = phi [0, vector.ph], [i+4, vector.body]
3555   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
3556   //     v2 = a[i, i+1, i+2, i+3];
3557   //     v3 = vector(v1(3), v2(0, 1, 2))
3558   //     b[i, i+1, i+2, i+3] = v2 - v3
3559   //     br cond, vector.body, middle.block
3560   //
3561   //   middle.block:
3562   //     x = v2(3)
3563   //     br scalar.ph
3564   //
3565   //   scalar.ph:
3566   //     s_init = phi [x, middle.block], [a[-1], otherwise]
3567   //     br scalar.body
3568   //
3569   // After execution completes the vector loop, we extract the next value of
3570   // the recurrence (x) to use as the initial value in the scalar loop.
3571 
3572   // Get the original loop preheader and single loop latch.
3573   auto *Preheader = OrigLoop->getLoopPreheader();
3574   auto *Latch = OrigLoop->getLoopLatch();
3575 
3576   // Get the initial and previous values of the scalar recurrence.
3577   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
3578   auto *Previous = Phi->getIncomingValueForBlock(Latch);
3579 
3580   // Create a vector from the initial value.
3581   auto *VectorInit = ScalarInit;
3582   if (VF > 1) {
3583     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3584     VectorInit = Builder.CreateInsertElement(
3585         UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
3586         Builder.getInt32(VF - 1), "vector.recur.init");
3587   }
3588 
3589   // We constructed a temporary phi node in the first phase of vectorization.
3590   // This phi node will eventually be deleted.
3591   Builder.SetInsertPoint(
3592       cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0)));
3593 
3594   // Create a phi node for the new recurrence. The current value will either be
3595   // the initial value inserted into a vector or loop-varying vector value.
3596   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
3597   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
3598 
3599   // Get the vectorized previous value of the last part UF - 1. It appears last
3600   // among all unrolled iterations, due to the order of their construction.
3601   Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1);
3602 
3603   // Find and set the insertion point after the previous value if it is an
3604   // instruction.
3605   BasicBlock::iterator InsertPt;
3606   // Note that the previous value may have been constant-folded so it is not
3607   // guaranteed to be an instruction in the vector loop.
3608   // FIXME: Loop invariant values do not form recurrences. We should deal with
3609   //        them earlier.
3610   if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart))
3611     InsertPt = LoopVectorBody->getFirstInsertionPt();
3612   else {
3613     Instruction *PreviousInst = cast<Instruction>(PreviousLastPart);
3614     if (isa<PHINode>(PreviousLastPart))
3615       // If the previous value is a phi node, we should insert after all the phi
3616       // nodes in the block containing the PHI to avoid breaking basic block
3617       // verification. Note that the basic block may be different to
3618       // LoopVectorBody, in case we predicate the loop.
3619       InsertPt = PreviousInst->getParent()->getFirstInsertionPt();
3620     else
3621       InsertPt = ++PreviousInst->getIterator();
3622   }
3623   Builder.SetInsertPoint(&*InsertPt);
3624 
3625   // We will construct a vector for the recurrence by combining the values for
3626   // the current and previous iterations. This is the required shuffle mask.
3627   SmallVector<Constant *, 8> ShuffleMask(VF);
3628   ShuffleMask[0] = Builder.getInt32(VF - 1);
3629   for (unsigned I = 1; I < VF; ++I)
3630     ShuffleMask[I] = Builder.getInt32(I + VF - 1);
3631 
3632   // The vector from which to take the initial value for the current iteration
3633   // (actual or unrolled). Initially, this is the vector phi node.
3634   Value *Incoming = VecPhi;
3635 
3636   // Shuffle the current and previous vector and update the vector parts.
3637   for (unsigned Part = 0; Part < UF; ++Part) {
3638     Value *PreviousPart = getOrCreateVectorValue(Previous, Part);
3639     Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part);
3640     auto *Shuffle =
3641         VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart,
3642                                              ConstantVector::get(ShuffleMask))
3643                : Incoming;
3644     PhiPart->replaceAllUsesWith(Shuffle);
3645     cast<Instruction>(PhiPart)->eraseFromParent();
3646     VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle);
3647     Incoming = PreviousPart;
3648   }
3649 
3650   // Fix the latch value of the new recurrence in the vector loop.
3651   VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3652 
3653   // Extract the last vector element in the middle block. This will be the
3654   // initial value for the recurrence when jumping to the scalar loop.
3655   auto *ExtractForScalar = Incoming;
3656   if (VF > 1) {
3657     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3658     ExtractForScalar = Builder.CreateExtractElement(
3659         ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract");
3660   }
3661   // Extract the second last element in the middle block if the
3662   // Phi is used outside the loop. We need to extract the phi itself
3663   // and not the last element (the phi update in the current iteration). This
3664   // will be the value when jumping to the exit block from the LoopMiddleBlock,
3665   // when the scalar loop is not run at all.
3666   Value *ExtractForPhiUsedOutsideLoop = nullptr;
3667   if (VF > 1)
3668     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
3669         Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi");
3670   // When loop is unrolled without vectorizing, initialize
3671   // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
3672   // `Incoming`. This is analogous to the vectorized case above: extracting the
3673   // second last element when VF > 1.
3674   else if (UF > 1)
3675     ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2);
3676 
3677   // Fix the initial value of the original recurrence in the scalar loop.
3678   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
3679   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
3680   for (auto *BB : predecessors(LoopScalarPreHeader)) {
3681     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
3682     Start->addIncoming(Incoming, BB);
3683   }
3684 
3685   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
3686   Phi->setName("scalar.recur");
3687 
3688   // Finally, fix users of the recurrence outside the loop. The users will need
3689   // either the last value of the scalar recurrence or the last value of the
3690   // vector recurrence we extracted in the middle block. Since the loop is in
3691   // LCSSA form, we just need to find all the phi nodes for the original scalar
3692   // recurrence in the exit block, and then add an edge for the middle block.
3693   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3694     if (LCSSAPhi.getIncomingValue(0) == Phi) {
3695       LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
3696     }
3697   }
3698 }
3699 
3700 void InnerLoopVectorizer::fixReduction(PHINode *Phi) {
3701   Constant *Zero = Builder.getInt32(0);
3702 
3703   // Get it's reduction variable descriptor.
3704   assert(Legal->isReductionVariable(Phi) &&
3705          "Unable to find the reduction variable");
3706   RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi];
3707 
3708   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3709   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
3710   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
3711   RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
3712     RdxDesc.getMinMaxRecurrenceKind();
3713   setDebugLocFromInst(Builder, ReductionStartValue);
3714 
3715   // We need to generate a reduction vector from the incoming scalar.
3716   // To do so, we need to generate the 'identity' vector and override
3717   // one of the elements with the incoming scalar reduction. We need
3718   // to do it in the vector-loop preheader.
3719   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3720 
3721   // This is the vector-clone of the value that leaves the loop.
3722   Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType();
3723 
3724   // Find the reduction identity variable. Zero for addition, or, xor,
3725   // one for multiplication, -1 for And.
3726   Value *Identity;
3727   Value *VectorStart;
3728   if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
3729       RK == RecurrenceDescriptor::RK_FloatMinMax) {
3730     // MinMax reduction have the start value as their identify.
3731     if (VF == 1) {
3732       VectorStart = Identity = ReductionStartValue;
3733     } else {
3734       VectorStart = Identity =
3735         Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident");
3736     }
3737   } else {
3738     // Handle other reduction kinds:
3739     Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
3740         RK, VecTy->getScalarType());
3741     if (VF == 1) {
3742       Identity = Iden;
3743       // This vector is the Identity vector where the first element is the
3744       // incoming scalar reduction.
3745       VectorStart = ReductionStartValue;
3746     } else {
3747       Identity = ConstantVector::getSplat({VF, false}, Iden);
3748 
3749       // This vector is the Identity vector where the first element is the
3750       // incoming scalar reduction.
3751       VectorStart =
3752         Builder.CreateInsertElement(Identity, ReductionStartValue, Zero);
3753     }
3754   }
3755 
3756   // Wrap flags are in general invalid after vectorization, clear them.
3757   clearReductionWrapFlags(RdxDesc);
3758 
3759   // Fix the vector-loop phi.
3760 
3761   // Reductions do not have to start at zero. They can start with
3762   // any loop invariant values.
3763   BasicBlock *Latch = OrigLoop->getLoopLatch();
3764   Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
3765 
3766   for (unsigned Part = 0; Part < UF; ++Part) {
3767     Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part);
3768     Value *Val = getOrCreateVectorValue(LoopVal, Part);
3769     // Make sure to add the reduction start value only to the
3770     // first unroll part.
3771     Value *StartVal = (Part == 0) ? VectorStart : Identity;
3772     cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader);
3773     cast<PHINode>(VecRdxPhi)
3774       ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3775   }
3776 
3777   // Before each round, move the insertion point right between
3778   // the PHIs and the values we are going to write.
3779   // This allows us to write both PHINodes and the extractelement
3780   // instructions.
3781   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3782 
3783   setDebugLocFromInst(Builder, LoopExitInst);
3784 
3785   // If tail is folded by masking, the vector value to leave the loop should be
3786   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
3787   // instead of the former.
3788   if (Cost->foldTailByMasking()) {
3789     for (unsigned Part = 0; Part < UF; ++Part) {
3790       Value *VecLoopExitInst =
3791           VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3792       Value *Sel = nullptr;
3793       for (User *U : VecLoopExitInst->users()) {
3794         if (isa<SelectInst>(U)) {
3795           assert(!Sel && "Reduction exit feeding two selects");
3796           Sel = U;
3797         } else
3798           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
3799       }
3800       assert(Sel && "Reduction exit feeds no select");
3801       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, Sel);
3802     }
3803   }
3804 
3805   // If the vector reduction can be performed in a smaller type, we truncate
3806   // then extend the loop exit value to enable InstCombine to evaluate the
3807   // entire expression in the smaller type.
3808   if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) {
3809     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
3810     Builder.SetInsertPoint(
3811         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
3812     VectorParts RdxParts(UF);
3813     for (unsigned Part = 0; Part < UF; ++Part) {
3814       RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3815       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3816       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
3817                                         : Builder.CreateZExt(Trunc, VecTy);
3818       for (Value::user_iterator UI = RdxParts[Part]->user_begin();
3819            UI != RdxParts[Part]->user_end();)
3820         if (*UI != Trunc) {
3821           (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
3822           RdxParts[Part] = Extnd;
3823         } else {
3824           ++UI;
3825         }
3826     }
3827     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3828     for (unsigned Part = 0; Part < UF; ++Part) {
3829       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
3830       VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]);
3831     }
3832   }
3833 
3834   // Reduce all of the unrolled parts into a single vector.
3835   Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0);
3836   unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
3837 
3838   // The middle block terminator has already been assigned a DebugLoc here (the
3839   // OrigLoop's single latch terminator). We want the whole middle block to
3840   // appear to execute on this line because: (a) it is all compiler generated,
3841   // (b) these instructions are always executed after evaluating the latch
3842   // conditional branch, and (c) other passes may add new predecessors which
3843   // terminate on this line. This is the easiest way to ensure we don't
3844   // accidentally cause an extra step back into the loop while debugging.
3845   setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator());
3846   for (unsigned Part = 1; Part < UF; ++Part) {
3847     Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
3848     if (Op != Instruction::ICmp && Op != Instruction::FCmp)
3849       // Floating point operations had to be 'fast' to enable the reduction.
3850       ReducedPartRdx = addFastMathFlag(
3851           Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart,
3852                               ReducedPartRdx, "bin.rdx"),
3853           RdxDesc.getFastMathFlags());
3854     else
3855       ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx,
3856                                       RdxPart);
3857   }
3858 
3859   if (VF > 1) {
3860     bool NoNaN = Legal->hasFunNoNaNAttr();
3861     ReducedPartRdx =
3862         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN);
3863     // If the reduction can be performed in a smaller type, we need to extend
3864     // the reduction to the wider type before we branch to the original loop.
3865     if (Phi->getType() != RdxDesc.getRecurrenceType())
3866       ReducedPartRdx =
3867         RdxDesc.isSigned()
3868         ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
3869         : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
3870   }
3871 
3872   // Create a phi node that merges control-flow from the backedge-taken check
3873   // block and the middle block.
3874   PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
3875                                         LoopScalarPreHeader->getTerminator());
3876   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
3877     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
3878   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
3879 
3880   // Now, we need to fix the users of the reduction variable
3881   // inside and outside of the scalar remainder loop.
3882   // We know that the loop is in LCSSA form. We need to update the
3883   // PHI nodes in the exit blocks.
3884   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3885     // All PHINodes need to have a single entry edge, or two if
3886     // we already fixed them.
3887     assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
3888 
3889     // We found a reduction value exit-PHI. Update it with the
3890     // incoming bypass edge.
3891     if (LCSSAPhi.getIncomingValue(0) == LoopExitInst)
3892       LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
3893   } // end of the LCSSA phi scan.
3894 
3895     // Fix the scalar loop reduction variable with the incoming reduction sum
3896     // from the vector body and from the backedge value.
3897   int IncomingEdgeBlockIdx =
3898     Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
3899   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
3900   // Pick the other block.
3901   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
3902   Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
3903   Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
3904 }
3905 
3906 void InnerLoopVectorizer::clearReductionWrapFlags(
3907     RecurrenceDescriptor &RdxDesc) {
3908   RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3909   if (RK != RecurrenceDescriptor::RK_IntegerAdd &&
3910       RK != RecurrenceDescriptor::RK_IntegerMult)
3911     return;
3912 
3913   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
3914   assert(LoopExitInstr && "null loop exit instruction");
3915   SmallVector<Instruction *, 8> Worklist;
3916   SmallPtrSet<Instruction *, 8> Visited;
3917   Worklist.push_back(LoopExitInstr);
3918   Visited.insert(LoopExitInstr);
3919 
3920   while (!Worklist.empty()) {
3921     Instruction *Cur = Worklist.pop_back_val();
3922     if (isa<OverflowingBinaryOperator>(Cur))
3923       for (unsigned Part = 0; Part < UF; ++Part) {
3924         Value *V = getOrCreateVectorValue(Cur, Part);
3925         cast<Instruction>(V)->dropPoisonGeneratingFlags();
3926       }
3927 
3928     for (User *U : Cur->users()) {
3929       Instruction *UI = cast<Instruction>(U);
3930       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
3931           Visited.insert(UI).second)
3932         Worklist.push_back(UI);
3933     }
3934   }
3935 }
3936 
3937 void InnerLoopVectorizer::fixLCSSAPHIs() {
3938   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
3939     if (LCSSAPhi.getNumIncomingValues() == 1) {
3940       auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
3941       // Non-instruction incoming values will have only one value.
3942       unsigned LastLane = 0;
3943       if (isa<Instruction>(IncomingValue))
3944           LastLane = Cost->isUniformAfterVectorization(
3945                          cast<Instruction>(IncomingValue), VF)
3946                          ? 0
3947                          : VF - 1;
3948       // Can be a loop invariant incoming value or the last scalar value to be
3949       // extracted from the vectorized loop.
3950       Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3951       Value *lastIncomingValue =
3952           getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane });
3953       LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
3954     }
3955   }
3956 }
3957 
3958 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
3959   // The basic block and loop containing the predicated instruction.
3960   auto *PredBB = PredInst->getParent();
3961   auto *VectorLoop = LI->getLoopFor(PredBB);
3962 
3963   // Initialize a worklist with the operands of the predicated instruction.
3964   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
3965 
3966   // Holds instructions that we need to analyze again. An instruction may be
3967   // reanalyzed if we don't yet know if we can sink it or not.
3968   SmallVector<Instruction *, 8> InstsToReanalyze;
3969 
3970   // Returns true if a given use occurs in the predicated block. Phi nodes use
3971   // their operands in their corresponding predecessor blocks.
3972   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
3973     auto *I = cast<Instruction>(U.getUser());
3974     BasicBlock *BB = I->getParent();
3975     if (auto *Phi = dyn_cast<PHINode>(I))
3976       BB = Phi->getIncomingBlock(
3977           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
3978     return BB == PredBB;
3979   };
3980 
3981   // Iteratively sink the scalarized operands of the predicated instruction
3982   // into the block we created for it. When an instruction is sunk, it's
3983   // operands are then added to the worklist. The algorithm ends after one pass
3984   // through the worklist doesn't sink a single instruction.
3985   bool Changed;
3986   do {
3987     // Add the instructions that need to be reanalyzed to the worklist, and
3988     // reset the changed indicator.
3989     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
3990     InstsToReanalyze.clear();
3991     Changed = false;
3992 
3993     while (!Worklist.empty()) {
3994       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
3995 
3996       // We can't sink an instruction if it is a phi node, is already in the
3997       // predicated block, is not in the loop, or may have side effects.
3998       if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
3999           !VectorLoop->contains(I) || I->mayHaveSideEffects())
4000         continue;
4001 
4002       // It's legal to sink the instruction if all its uses occur in the
4003       // predicated block. Otherwise, there's nothing to do yet, and we may
4004       // need to reanalyze the instruction.
4005       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4006         InstsToReanalyze.push_back(I);
4007         continue;
4008       }
4009 
4010       // Move the instruction to the beginning of the predicated block, and add
4011       // it's operands to the worklist.
4012       I->moveBefore(&*PredBB->getFirstInsertionPt());
4013       Worklist.insert(I->op_begin(), I->op_end());
4014 
4015       // The sinking may have enabled other instructions to be sunk, so we will
4016       // need to iterate.
4017       Changed = true;
4018     }
4019   } while (Changed);
4020 }
4021 
4022 void InnerLoopVectorizer::fixNonInductionPHIs() {
4023   for (PHINode *OrigPhi : OrigPHIsToFix) {
4024     PHINode *NewPhi =
4025         cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0));
4026     unsigned NumIncomingValues = OrigPhi->getNumIncomingValues();
4027 
4028     SmallVector<BasicBlock *, 2> ScalarBBPredecessors(
4029         predecessors(OrigPhi->getParent()));
4030     SmallVector<BasicBlock *, 2> VectorBBPredecessors(
4031         predecessors(NewPhi->getParent()));
4032     assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() &&
4033            "Scalar and Vector BB should have the same number of predecessors");
4034 
4035     // The insertion point in Builder may be invalidated by the time we get
4036     // here. Force the Builder insertion point to something valid so that we do
4037     // not run into issues during insertion point restore in
4038     // getOrCreateVectorValue calls below.
4039     Builder.SetInsertPoint(NewPhi);
4040 
4041     // The predecessor order is preserved and we can rely on mapping between
4042     // scalar and vector block predecessors.
4043     for (unsigned i = 0; i < NumIncomingValues; ++i) {
4044       BasicBlock *NewPredBB = VectorBBPredecessors[i];
4045 
4046       // When looking up the new scalar/vector values to fix up, use incoming
4047       // values from original phi.
4048       Value *ScIncV =
4049           OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]);
4050 
4051       // Scalar incoming value may need a broadcast
4052       Value *NewIncV = getOrCreateVectorValue(ScIncV, 0);
4053       NewPhi->addIncoming(NewIncV, NewPredBB);
4054     }
4055   }
4056 }
4057 
4058 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, unsigned UF,
4059                                    unsigned VF, bool IsPtrLoopInvariant,
4060                                    SmallBitVector &IsIndexLoopInvariant) {
4061   // Construct a vector GEP by widening the operands of the scalar GEP as
4062   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
4063   // results in a vector of pointers when at least one operand of the GEP
4064   // is vector-typed. Thus, to keep the representation compact, we only use
4065   // vector-typed operands for loop-varying values.
4066 
4067   if (VF > 1 && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
4068     // If we are vectorizing, but the GEP has only loop-invariant operands,
4069     // the GEP we build (by only using vector-typed operands for
4070     // loop-varying values) would be a scalar pointer. Thus, to ensure we
4071     // produce a vector of pointers, we need to either arbitrarily pick an
4072     // operand to broadcast, or broadcast a clone of the original GEP.
4073     // Here, we broadcast a clone of the original.
4074     //
4075     // TODO: If at some point we decide to scalarize instructions having
4076     //       loop-invariant operands, this special case will no longer be
4077     //       required. We would add the scalarization decision to
4078     //       collectLoopScalars() and teach getVectorValue() to broadcast
4079     //       the lane-zero scalar value.
4080     auto *Clone = Builder.Insert(GEP->clone());
4081     for (unsigned Part = 0; Part < UF; ++Part) {
4082       Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
4083       VectorLoopValueMap.setVectorValue(GEP, Part, EntryPart);
4084       addMetadata(EntryPart, GEP);
4085     }
4086   } else {
4087     // If the GEP has at least one loop-varying operand, we are sure to
4088     // produce a vector of pointers. But if we are only unrolling, we want
4089     // to produce a scalar GEP for each unroll part. Thus, the GEP we
4090     // produce with the code below will be scalar (if VF == 1) or vector
4091     // (otherwise). Note that for the unroll-only case, we still maintain
4092     // values in the vector mapping with initVector, as we do for other
4093     // instructions.
4094     for (unsigned Part = 0; Part < UF; ++Part) {
4095       // The pointer operand of the new GEP. If it's loop-invariant, we
4096       // won't broadcast it.
4097       auto *Ptr = IsPtrLoopInvariant
4098                       ? GEP->getPointerOperand()
4099                       : getOrCreateVectorValue(GEP->getPointerOperand(), Part);
4100 
4101       // Collect all the indices for the new GEP. If any index is
4102       // loop-invariant, we won't broadcast it.
4103       SmallVector<Value *, 4> Indices;
4104       for (auto Index : enumerate(GEP->indices())) {
4105         Value *User = Index.value().get();
4106         if (IsIndexLoopInvariant[Index.index()])
4107           Indices.push_back(User);
4108         else
4109           Indices.push_back(getOrCreateVectorValue(User, Part));
4110       }
4111 
4112       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
4113       // but it should be a vector, otherwise.
4114       auto *NewGEP =
4115           GEP->isInBounds()
4116               ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr,
4117                                           Indices)
4118               : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices);
4119       assert((VF == 1 || NewGEP->getType()->isVectorTy()) &&
4120              "NewGEP is not a pointer vector");
4121       VectorLoopValueMap.setVectorValue(GEP, Part, NewGEP);
4122       addMetadata(NewGEP, GEP);
4123     }
4124   }
4125 }
4126 
4127 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF,
4128                                               unsigned VF) {
4129   PHINode *P = cast<PHINode>(PN);
4130   if (EnableVPlanNativePath) {
4131     // Currently we enter here in the VPlan-native path for non-induction
4132     // PHIs where all control flow is uniform. We simply widen these PHIs.
4133     // Create a vector phi with no operands - the vector phi operands will be
4134     // set at the end of vector code generation.
4135     Type *VecTy =
4136         (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
4137     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4138     VectorLoopValueMap.setVectorValue(P, 0, VecPhi);
4139     OrigPHIsToFix.push_back(P);
4140 
4141     return;
4142   }
4143 
4144   assert(PN->getParent() == OrigLoop->getHeader() &&
4145          "Non-header phis should have been handled elsewhere");
4146 
4147   // In order to support recurrences we need to be able to vectorize Phi nodes.
4148   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4149   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4150   // this value when we vectorize all of the instructions that use the PHI.
4151   if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
4152     for (unsigned Part = 0; Part < UF; ++Part) {
4153       // This is phase one of vectorizing PHIs.
4154       Type *VecTy =
4155           (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
4156       Value *EntryPart = PHINode::Create(
4157           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4158       VectorLoopValueMap.setVectorValue(P, Part, EntryPart);
4159     }
4160     return;
4161   }
4162 
4163   setDebugLocFromInst(Builder, P);
4164 
4165   // This PHINode must be an induction variable.
4166   // Make sure that we know about it.
4167   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4168 
4169   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4170   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4171 
4172   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4173   // which can be found from the original scalar operations.
4174   switch (II.getKind()) {
4175   case InductionDescriptor::IK_NoInduction:
4176     llvm_unreachable("Unknown induction");
4177   case InductionDescriptor::IK_IntInduction:
4178   case InductionDescriptor::IK_FpInduction:
4179     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4180   case InductionDescriptor::IK_PtrInduction: {
4181     // Handle the pointer induction variable case.
4182     assert(P->getType()->isPointerTy() && "Unexpected type.");
4183     // This is the normalized GEP that starts counting at zero.
4184     Value *PtrInd = Induction;
4185     PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType());
4186     // Determine the number of scalars we need to generate for each unroll
4187     // iteration. If the instruction is uniform, we only need to generate the
4188     // first lane. Otherwise, we generate all VF values.
4189     unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF;
4190     // These are the scalar results. Notice that we don't generate vector GEPs
4191     // because scalar GEPs result in better code.
4192     for (unsigned Part = 0; Part < UF; ++Part) {
4193       for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4194         Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF);
4195         Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4196         Value *SclrGep =
4197             emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4198         SclrGep->setName("next.gep");
4199         VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep);
4200       }
4201     }
4202     return;
4203   }
4204   }
4205 }
4206 
4207 /// A helper function for checking whether an integer division-related
4208 /// instruction may divide by zero (in which case it must be predicated if
4209 /// executed conditionally in the scalar code).
4210 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4211 /// Non-zero divisors that are non compile-time constants will not be
4212 /// converted into multiplication, so we will still end up scalarizing
4213 /// the division, but can do so w/o predication.
4214 static bool mayDivideByZero(Instruction &I) {
4215   assert((I.getOpcode() == Instruction::UDiv ||
4216           I.getOpcode() == Instruction::SDiv ||
4217           I.getOpcode() == Instruction::URem ||
4218           I.getOpcode() == Instruction::SRem) &&
4219          "Unexpected instruction");
4220   Value *Divisor = I.getOperand(1);
4221   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4222   return !CInt || CInt->isZero();
4223 }
4224 
4225 void InnerLoopVectorizer::widenInstruction(Instruction &I) {
4226   switch (I.getOpcode()) {
4227   case Instruction::Call:
4228   case Instruction::Br:
4229   case Instruction::PHI:
4230   case Instruction::GetElementPtr:
4231   case Instruction::Select:
4232     llvm_unreachable("This instruction is handled by a different recipe.");
4233   case Instruction::UDiv:
4234   case Instruction::SDiv:
4235   case Instruction::SRem:
4236   case Instruction::URem:
4237   case Instruction::Add:
4238   case Instruction::FAdd:
4239   case Instruction::Sub:
4240   case Instruction::FSub:
4241   case Instruction::FNeg:
4242   case Instruction::Mul:
4243   case Instruction::FMul:
4244   case Instruction::FDiv:
4245   case Instruction::FRem:
4246   case Instruction::Shl:
4247   case Instruction::LShr:
4248   case Instruction::AShr:
4249   case Instruction::And:
4250   case Instruction::Or:
4251   case Instruction::Xor: {
4252     // Just widen unops and binops.
4253     setDebugLocFromInst(Builder, &I);
4254 
4255     for (unsigned Part = 0; Part < UF; ++Part) {
4256       SmallVector<Value *, 2> Ops;
4257       for (Value *Op : I.operands())
4258         Ops.push_back(getOrCreateVectorValue(Op, Part));
4259 
4260       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
4261 
4262       if (auto *VecOp = dyn_cast<Instruction>(V))
4263         VecOp->copyIRFlags(&I);
4264 
4265       // Use this vector value for all users of the original instruction.
4266       VectorLoopValueMap.setVectorValue(&I, Part, V);
4267       addMetadata(V, &I);
4268     }
4269 
4270     break;
4271   }
4272   case Instruction::ICmp:
4273   case Instruction::FCmp: {
4274     // Widen compares. Generate vector compares.
4275     bool FCmp = (I.getOpcode() == Instruction::FCmp);
4276     auto *Cmp = cast<CmpInst>(&I);
4277     setDebugLocFromInst(Builder, Cmp);
4278     for (unsigned Part = 0; Part < UF; ++Part) {
4279       Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part);
4280       Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part);
4281       Value *C = nullptr;
4282       if (FCmp) {
4283         // Propagate fast math flags.
4284         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
4285         Builder.setFastMathFlags(Cmp->getFastMathFlags());
4286         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
4287       } else {
4288         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
4289       }
4290       VectorLoopValueMap.setVectorValue(&I, Part, C);
4291       addMetadata(C, &I);
4292     }
4293 
4294     break;
4295   }
4296 
4297   case Instruction::ZExt:
4298   case Instruction::SExt:
4299   case Instruction::FPToUI:
4300   case Instruction::FPToSI:
4301   case Instruction::FPExt:
4302   case Instruction::PtrToInt:
4303   case Instruction::IntToPtr:
4304   case Instruction::SIToFP:
4305   case Instruction::UIToFP:
4306   case Instruction::Trunc:
4307   case Instruction::FPTrunc:
4308   case Instruction::BitCast: {
4309     auto *CI = cast<CastInst>(&I);
4310     setDebugLocFromInst(Builder, CI);
4311 
4312     /// Vectorize casts.
4313     Type *DestTy =
4314         (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF);
4315 
4316     for (unsigned Part = 0; Part < UF; ++Part) {
4317       Value *A = getOrCreateVectorValue(CI->getOperand(0), Part);
4318       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
4319       VectorLoopValueMap.setVectorValue(&I, Part, Cast);
4320       addMetadata(Cast, &I);
4321     }
4322     break;
4323   }
4324   default:
4325     // This instruction is not vectorized by simple widening.
4326     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
4327     llvm_unreachable("Unhandled instruction!");
4328   } // end of switch.
4329 }
4330 
4331 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPUser &ArgOperands,
4332                                                VPTransformState &State) {
4333   assert(!isa<DbgInfoIntrinsic>(I) &&
4334          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4335   setDebugLocFromInst(Builder, &I);
4336 
4337   Module *M = I.getParent()->getParent()->getParent();
4338   auto *CI = cast<CallInst>(&I);
4339 
4340   SmallVector<Type *, 4> Tys;
4341   for (Value *ArgOperand : CI->arg_operands())
4342     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF));
4343 
4344   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4345 
4346   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4347   // version of the instruction.
4348   // Is it beneficial to perform intrinsic call compared to lib call?
4349   bool NeedToScalarize = false;
4350   unsigned CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4351   bool UseVectorIntrinsic =
4352       ID && Cost->getVectorIntrinsicCost(CI, VF) <= CallCost;
4353   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4354          "Instruction should be scalarized elsewhere.");
4355 
4356   for (unsigned Part = 0; Part < UF; ++Part) {
4357     SmallVector<Value *, 4> Args;
4358     for (auto &I : enumerate(ArgOperands.operands())) {
4359       // Some intrinsics have a scalar argument - don't replace it with a
4360       // vector.
4361       Value *Arg;
4362       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4363         Arg = State.get(I.value(), Part);
4364       else
4365         Arg = State.get(I.value(), {0, 0});
4366       Args.push_back(Arg);
4367     }
4368 
4369     Function *VectorF;
4370     if (UseVectorIntrinsic) {
4371       // Use vector version of the intrinsic.
4372       Type *TysForDecl[] = {CI->getType()};
4373       if (VF > 1)
4374         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4375       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4376     } else {
4377       // Use vector version of the function call.
4378       const VFShape Shape =
4379           VFShape::get(*CI, {VF, false} /*EC*/, false /*HasGlobalPred*/);
4380 #ifndef NDEBUG
4381         const SmallVector<VFInfo, 8> Infos = VFDatabase::getMappings(*CI);
4382         assert(std::find_if(Infos.begin(), Infos.end(),
4383                             [&Shape](const VFInfo &Info) {
4384                               return Info.Shape == Shape;
4385                             }) != Infos.end() &&
4386                "Vector function shape is missing from the database.");
4387 #endif
4388         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4389     }
4390       assert(VectorF && "Can't create vector function.");
4391 
4392       SmallVector<OperandBundleDef, 1> OpBundles;
4393       CI->getOperandBundlesAsDefs(OpBundles);
4394       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4395 
4396       if (isa<FPMathOperator>(V))
4397         V->copyFastMathFlags(CI);
4398 
4399       VectorLoopValueMap.setVectorValue(&I, Part, V);
4400       addMetadata(V, &I);
4401   }
4402 }
4403 
4404 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I,
4405                                                  bool InvariantCond) {
4406   setDebugLocFromInst(Builder, &I);
4407 
4408   // The condition can be loop invariant  but still defined inside the
4409   // loop. This means that we can't just use the original 'cond' value.
4410   // We have to take the 'vectorized' value and pick the first lane.
4411   // Instcombine will make this a no-op.
4412 
4413   auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0});
4414 
4415   for (unsigned Part = 0; Part < UF; ++Part) {
4416     Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part);
4417     Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part);
4418     Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part);
4419     Value *Sel =
4420         Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1);
4421     VectorLoopValueMap.setVectorValue(&I, Part, Sel);
4422     addMetadata(Sel, &I);
4423   }
4424 }
4425 
4426 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
4427   // We should not collect Scalars more than once per VF. Right now, this
4428   // function is called from collectUniformsAndScalars(), which already does
4429   // this check. Collecting Scalars for VF=1 does not make any sense.
4430   assert(VF >= 2 && Scalars.find(VF) == Scalars.end() &&
4431          "This function should not be visited twice for the same VF");
4432 
4433   SmallSetVector<Instruction *, 8> Worklist;
4434 
4435   // These sets are used to seed the analysis with pointers used by memory
4436   // accesses that will remain scalar.
4437   SmallSetVector<Instruction *, 8> ScalarPtrs;
4438   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4439 
4440   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4441   // The pointer operands of loads and stores will be scalar as long as the
4442   // memory access is not a gather or scatter operation. The value operand of a
4443   // store will remain scalar if the store is scalarized.
4444   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4445     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4446     assert(WideningDecision != CM_Unknown &&
4447            "Widening decision should be ready at this moment");
4448     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4449       if (Ptr == Store->getValueOperand())
4450         return WideningDecision == CM_Scalarize;
4451     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4452            "Ptr is neither a value or pointer operand");
4453     return WideningDecision != CM_GatherScatter;
4454   };
4455 
4456   // A helper that returns true if the given value is a bitcast or
4457   // getelementptr instruction contained in the loop.
4458   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4459     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4460             isa<GetElementPtrInst>(V)) &&
4461            !TheLoop->isLoopInvariant(V);
4462   };
4463 
4464   // A helper that evaluates a memory access's use of a pointer. If the use
4465   // will be a scalar use, and the pointer is only used by memory accesses, we
4466   // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4467   // PossibleNonScalarPtrs.
4468   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4469     // We only care about bitcast and getelementptr instructions contained in
4470     // the loop.
4471     if (!isLoopVaryingBitCastOrGEP(Ptr))
4472       return;
4473 
4474     // If the pointer has already been identified as scalar (e.g., if it was
4475     // also identified as uniform), there's nothing to do.
4476     auto *I = cast<Instruction>(Ptr);
4477     if (Worklist.count(I))
4478       return;
4479 
4480     // If the use of the pointer will be a scalar use, and all users of the
4481     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4482     // place the pointer in PossibleNonScalarPtrs.
4483     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4484           return isa<LoadInst>(U) || isa<StoreInst>(U);
4485         }))
4486       ScalarPtrs.insert(I);
4487     else
4488       PossibleNonScalarPtrs.insert(I);
4489   };
4490 
4491   // We seed the scalars analysis with three classes of instructions: (1)
4492   // instructions marked uniform-after-vectorization, (2) bitcast and
4493   // getelementptr instructions used by memory accesses requiring a scalar use,
4494   // and (3) pointer induction variables and their update instructions (we
4495   // currently only scalarize these).
4496   //
4497   // (1) Add to the worklist all instructions that have been identified as
4498   // uniform-after-vectorization.
4499   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4500 
4501   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4502   // memory accesses requiring a scalar use. The pointer operands of loads and
4503   // stores will be scalar as long as the memory accesses is not a gather or
4504   // scatter operation. The value operand of a store will remain scalar if the
4505   // store is scalarized.
4506   for (auto *BB : TheLoop->blocks())
4507     for (auto &I : *BB) {
4508       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4509         evaluatePtrUse(Load, Load->getPointerOperand());
4510       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4511         evaluatePtrUse(Store, Store->getPointerOperand());
4512         evaluatePtrUse(Store, Store->getValueOperand());
4513       }
4514     }
4515   for (auto *I : ScalarPtrs)
4516     if (PossibleNonScalarPtrs.find(I) == PossibleNonScalarPtrs.end()) {
4517       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4518       Worklist.insert(I);
4519     }
4520 
4521   // (3) Add to the worklist all pointer induction variables and their update
4522   // instructions.
4523   //
4524   // TODO: Once we are able to vectorize pointer induction variables we should
4525   //       no longer insert them into the worklist here.
4526   auto *Latch = TheLoop->getLoopLatch();
4527   for (auto &Induction : Legal->getInductionVars()) {
4528     auto *Ind = Induction.first;
4529     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4530     if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction)
4531       continue;
4532     Worklist.insert(Ind);
4533     Worklist.insert(IndUpdate);
4534     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4535     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4536                       << "\n");
4537   }
4538 
4539   // Insert the forced scalars.
4540   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4541   // induction variable when the PHI user is scalarized.
4542   auto ForcedScalar = ForcedScalars.find(VF);
4543   if (ForcedScalar != ForcedScalars.end())
4544     for (auto *I : ForcedScalar->second)
4545       Worklist.insert(I);
4546 
4547   // Expand the worklist by looking through any bitcasts and getelementptr
4548   // instructions we've already identified as scalar. This is similar to the
4549   // expansion step in collectLoopUniforms(); however, here we're only
4550   // expanding to include additional bitcasts and getelementptr instructions.
4551   unsigned Idx = 0;
4552   while (Idx != Worklist.size()) {
4553     Instruction *Dst = Worklist[Idx++];
4554     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4555       continue;
4556     auto *Src = cast<Instruction>(Dst->getOperand(0));
4557     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4558           auto *J = cast<Instruction>(U);
4559           return !TheLoop->contains(J) || Worklist.count(J) ||
4560                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4561                   isScalarUse(J, Src));
4562         })) {
4563       Worklist.insert(Src);
4564       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4565     }
4566   }
4567 
4568   // An induction variable will remain scalar if all users of the induction
4569   // variable and induction variable update remain scalar.
4570   for (auto &Induction : Legal->getInductionVars()) {
4571     auto *Ind = Induction.first;
4572     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4573 
4574     // We already considered pointer induction variables, so there's no reason
4575     // to look at their users again.
4576     //
4577     // TODO: Once we are able to vectorize pointer induction variables we
4578     //       should no longer skip over them here.
4579     if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction)
4580       continue;
4581 
4582     // Determine if all users of the induction variable are scalar after
4583     // vectorization.
4584     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4585       auto *I = cast<Instruction>(U);
4586       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
4587     });
4588     if (!ScalarInd)
4589       continue;
4590 
4591     // Determine if all users of the induction variable update instruction are
4592     // scalar after vectorization.
4593     auto ScalarIndUpdate =
4594         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4595           auto *I = cast<Instruction>(U);
4596           return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
4597         });
4598     if (!ScalarIndUpdate)
4599       continue;
4600 
4601     // The induction variable and its update instruction will remain scalar.
4602     Worklist.insert(Ind);
4603     Worklist.insert(IndUpdate);
4604     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4605     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4606                       << "\n");
4607   }
4608 
4609   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4610 }
4611 
4612 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) {
4613   if (!blockNeedsPredication(I->getParent()))
4614     return false;
4615   switch(I->getOpcode()) {
4616   default:
4617     break;
4618   case Instruction::Load:
4619   case Instruction::Store: {
4620     if (!Legal->isMaskRequired(I))
4621       return false;
4622     auto *Ptr = getLoadStorePointerOperand(I);
4623     auto *Ty = getMemInstValueType(I);
4624     // We have already decided how to vectorize this instruction, get that
4625     // result.
4626     if (VF > 1) {
4627       InstWidening WideningDecision = getWideningDecision(I, VF);
4628       assert(WideningDecision != CM_Unknown &&
4629              "Widening decision should be ready at this moment");
4630       return WideningDecision == CM_Scalarize;
4631     }
4632     const MaybeAlign Alignment = getLoadStoreAlignment(I);
4633     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4634                                 isLegalMaskedGather(Ty, Alignment))
4635                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4636                                 isLegalMaskedScatter(Ty, Alignment));
4637   }
4638   case Instruction::UDiv:
4639   case Instruction::SDiv:
4640   case Instruction::SRem:
4641   case Instruction::URem:
4642     return mayDivideByZero(*I);
4643   }
4644   return false;
4645 }
4646 
4647 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I,
4648                                                                unsigned VF) {
4649   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
4650   assert(getWideningDecision(I, VF) == CM_Unknown &&
4651          "Decision should not be set yet.");
4652   auto *Group = getInterleavedAccessGroup(I);
4653   assert(Group && "Must have a group.");
4654 
4655   // If the instruction's allocated size doesn't equal it's type size, it
4656   // requires padding and will be scalarized.
4657   auto &DL = I->getModule()->getDataLayout();
4658   auto *ScalarTy = getMemInstValueType(I);
4659   if (hasIrregularType(ScalarTy, DL, VF))
4660     return false;
4661 
4662   // Check if masking is required.
4663   // A Group may need masking for one of two reasons: it resides in a block that
4664   // needs predication, or it was decided to use masking to deal with gaps.
4665   bool PredicatedAccessRequiresMasking =
4666       Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I);
4667   bool AccessWithGapsRequiresMasking =
4668       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
4669   if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking)
4670     return true;
4671 
4672   // If masked interleaving is required, we expect that the user/target had
4673   // enabled it, because otherwise it either wouldn't have been created or
4674   // it should have been invalidated by the CostModel.
4675   assert(useMaskedInterleavedAccesses(TTI) &&
4676          "Masked interleave-groups for predicated accesses are not enabled.");
4677 
4678   auto *Ty = getMemInstValueType(I);
4679   const MaybeAlign Alignment = getLoadStoreAlignment(I);
4680   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
4681                           : TTI.isLegalMaskedStore(Ty, Alignment);
4682 }
4683 
4684 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I,
4685                                                                unsigned VF) {
4686   // Get and ensure we have a valid memory instruction.
4687   LoadInst *LI = dyn_cast<LoadInst>(I);
4688   StoreInst *SI = dyn_cast<StoreInst>(I);
4689   assert((LI || SI) && "Invalid memory instruction");
4690 
4691   auto *Ptr = getLoadStorePointerOperand(I);
4692 
4693   // In order to be widened, the pointer should be consecutive, first of all.
4694   if (!Legal->isConsecutivePtr(Ptr))
4695     return false;
4696 
4697   // If the instruction is a store located in a predicated block, it will be
4698   // scalarized.
4699   if (isScalarWithPredication(I))
4700     return false;
4701 
4702   // If the instruction's allocated size doesn't equal it's type size, it
4703   // requires padding and will be scalarized.
4704   auto &DL = I->getModule()->getDataLayout();
4705   auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
4706   if (hasIrregularType(ScalarTy, DL, VF))
4707     return false;
4708 
4709   return true;
4710 }
4711 
4712 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
4713   // We should not collect Uniforms more than once per VF. Right now,
4714   // this function is called from collectUniformsAndScalars(), which
4715   // already does this check. Collecting Uniforms for VF=1 does not make any
4716   // sense.
4717 
4718   assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() &&
4719          "This function should not be visited twice for the same VF");
4720 
4721   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
4722   // not analyze again.  Uniforms.count(VF) will return 1.
4723   Uniforms[VF].clear();
4724 
4725   // We now know that the loop is vectorizable!
4726   // Collect instructions inside the loop that will remain uniform after
4727   // vectorization.
4728 
4729   // Global values, params and instructions outside of current loop are out of
4730   // scope.
4731   auto isOutOfScope = [&](Value *V) -> bool {
4732     Instruction *I = dyn_cast<Instruction>(V);
4733     return (!I || !TheLoop->contains(I));
4734   };
4735 
4736   SetVector<Instruction *> Worklist;
4737   BasicBlock *Latch = TheLoop->getLoopLatch();
4738 
4739   // Instructions that are scalar with predication must not be considered
4740   // uniform after vectorization, because that would create an erroneous
4741   // replicating region where only a single instance out of VF should be formed.
4742   // TODO: optimize such seldom cases if found important, see PR40816.
4743   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
4744     if (isScalarWithPredication(I, VF)) {
4745       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
4746                         << *I << "\n");
4747       return;
4748     }
4749     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
4750     Worklist.insert(I);
4751   };
4752 
4753   // Start with the conditional branch. If the branch condition is an
4754   // instruction contained in the loop that is only used by the branch, it is
4755   // uniform.
4756   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
4757   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
4758     addToWorklistIfAllowed(Cmp);
4759 
4760   // Holds consecutive and consecutive-like pointers. Consecutive-like pointers
4761   // are pointers that are treated like consecutive pointers during
4762   // vectorization. The pointer operands of interleaved accesses are an
4763   // example.
4764   SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs;
4765 
4766   // Holds pointer operands of instructions that are possibly non-uniform.
4767   SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs;
4768 
4769   auto isUniformDecision = [&](Instruction *I, unsigned VF) {
4770     InstWidening WideningDecision = getWideningDecision(I, VF);
4771     assert(WideningDecision != CM_Unknown &&
4772            "Widening decision should be ready at this moment");
4773 
4774     return (WideningDecision == CM_Widen ||
4775             WideningDecision == CM_Widen_Reverse ||
4776             WideningDecision == CM_Interleave);
4777   };
4778   // Iterate over the instructions in the loop, and collect all
4779   // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible
4780   // that a consecutive-like pointer operand will be scalarized, we collect it
4781   // in PossibleNonUniformPtrs instead. We use two sets here because a single
4782   // getelementptr instruction can be used by both vectorized and scalarized
4783   // memory instructions. For example, if a loop loads and stores from the same
4784   // location, but the store is conditional, the store will be scalarized, and
4785   // the getelementptr won't remain uniform.
4786   for (auto *BB : TheLoop->blocks())
4787     for (auto &I : *BB) {
4788       // If there's no pointer operand, there's nothing to do.
4789       auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
4790       if (!Ptr)
4791         continue;
4792 
4793       // True if all users of Ptr are memory accesses that have Ptr as their
4794       // pointer operand.
4795       auto UsersAreMemAccesses =
4796           llvm::all_of(Ptr->users(), [&](User *U) -> bool {
4797             return getLoadStorePointerOperand(U) == Ptr;
4798           });
4799 
4800       // Ensure the memory instruction will not be scalarized or used by
4801       // gather/scatter, making its pointer operand non-uniform. If the pointer
4802       // operand is used by any instruction other than a memory access, we
4803       // conservatively assume the pointer operand may be non-uniform.
4804       if (!UsersAreMemAccesses || !isUniformDecision(&I, VF))
4805         PossibleNonUniformPtrs.insert(Ptr);
4806 
4807       // If the memory instruction will be vectorized and its pointer operand
4808       // is consecutive-like, or interleaving - the pointer operand should
4809       // remain uniform.
4810       else
4811         ConsecutiveLikePtrs.insert(Ptr);
4812     }
4813 
4814   // Add to the Worklist all consecutive and consecutive-like pointers that
4815   // aren't also identified as possibly non-uniform.
4816   for (auto *V : ConsecutiveLikePtrs)
4817     if (PossibleNonUniformPtrs.find(V) == PossibleNonUniformPtrs.end())
4818       addToWorklistIfAllowed(V);
4819 
4820   // Expand Worklist in topological order: whenever a new instruction
4821   // is added , its users should be already inside Worklist.  It ensures
4822   // a uniform instruction will only be used by uniform instructions.
4823   unsigned idx = 0;
4824   while (idx != Worklist.size()) {
4825     Instruction *I = Worklist[idx++];
4826 
4827     for (auto OV : I->operand_values()) {
4828       // isOutOfScope operands cannot be uniform instructions.
4829       if (isOutOfScope(OV))
4830         continue;
4831       // First order recurrence Phi's should typically be considered
4832       // non-uniform.
4833       auto *OP = dyn_cast<PHINode>(OV);
4834       if (OP && Legal->isFirstOrderRecurrence(OP))
4835         continue;
4836       // If all the users of the operand are uniform, then add the
4837       // operand into the uniform worklist.
4838       auto *OI = cast<Instruction>(OV);
4839       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
4840             auto *J = cast<Instruction>(U);
4841             return Worklist.count(J) ||
4842                    (OI == getLoadStorePointerOperand(J) &&
4843                     isUniformDecision(J, VF));
4844           }))
4845         addToWorklistIfAllowed(OI);
4846     }
4847   }
4848 
4849   // Returns true if Ptr is the pointer operand of a memory access instruction
4850   // I, and I is known to not require scalarization.
4851   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
4852     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
4853   };
4854 
4855   // For an instruction to be added into Worklist above, all its users inside
4856   // the loop should also be in Worklist. However, this condition cannot be
4857   // true for phi nodes that form a cyclic dependence. We must process phi
4858   // nodes separately. An induction variable will remain uniform if all users
4859   // of the induction variable and induction variable update remain uniform.
4860   // The code below handles both pointer and non-pointer induction variables.
4861   for (auto &Induction : Legal->getInductionVars()) {
4862     auto *Ind = Induction.first;
4863     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4864 
4865     // Determine if all users of the induction variable are uniform after
4866     // vectorization.
4867     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4868       auto *I = cast<Instruction>(U);
4869       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4870              isVectorizedMemAccessUse(I, Ind);
4871     });
4872     if (!UniformInd)
4873       continue;
4874 
4875     // Determine if all users of the induction variable update instruction are
4876     // uniform after vectorization.
4877     auto UniformIndUpdate =
4878         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4879           auto *I = cast<Instruction>(U);
4880           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4881                  isVectorizedMemAccessUse(I, IndUpdate);
4882         });
4883     if (!UniformIndUpdate)
4884       continue;
4885 
4886     // The induction variable and its update instruction will remain uniform.
4887     addToWorklistIfAllowed(Ind);
4888     addToWorklistIfAllowed(IndUpdate);
4889   }
4890 
4891   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
4892 }
4893 
4894 bool LoopVectorizationCostModel::runtimeChecksRequired() {
4895   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
4896 
4897   if (Legal->getRuntimePointerChecking()->Need) {
4898     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
4899         "runtime pointer checks needed. Enable vectorization of this "
4900         "loop with '#pragma clang loop vectorize(enable)' when "
4901         "compiling with -Os/-Oz",
4902         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4903     return true;
4904   }
4905 
4906   if (!PSE.getUnionPredicate().getPredicates().empty()) {
4907     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
4908         "runtime SCEV checks needed. Enable vectorization of this "
4909         "loop with '#pragma clang loop vectorize(enable)' when "
4910         "compiling with -Os/-Oz",
4911         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4912     return true;
4913   }
4914 
4915   // FIXME: Avoid specializing for stride==1 instead of bailing out.
4916   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
4917     reportVectorizationFailure("Runtime stride check is required with -Os/-Oz",
4918         "runtime stride == 1 checks needed. Enable vectorization of "
4919         "this loop with '#pragma clang loop vectorize(enable)' when "
4920         "compiling with -Os/-Oz",
4921         "CantVersionLoopWithOptForSize", ORE, TheLoop);
4922     return true;
4923   }
4924 
4925   return false;
4926 }
4927 
4928 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF() {
4929   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
4930     // TODO: It may by useful to do since it's still likely to be dynamically
4931     // uniform if the target can skip.
4932     reportVectorizationFailure(
4933         "Not inserting runtime ptr check for divergent target",
4934         "runtime pointer checks needed. Not enabled for divergent target",
4935         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
4936     return None;
4937   }
4938 
4939   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
4940   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
4941   if (TC == 1) {
4942     reportVectorizationFailure("Single iteration (non) loop",
4943         "loop trip count is one, irrelevant for vectorization",
4944         "SingleIterationLoop", ORE, TheLoop);
4945     return None;
4946   }
4947 
4948   switch (ScalarEpilogueStatus) {
4949   case CM_ScalarEpilogueAllowed:
4950     return computeFeasibleMaxVF(TC);
4951   case CM_ScalarEpilogueNotNeededUsePredicate:
4952     LLVM_DEBUG(
4953         dbgs() << "LV: vector predicate hint/switch found.\n"
4954                << "LV: Not allowing scalar epilogue, creating predicated "
4955                << "vector loop.\n");
4956     break;
4957   case CM_ScalarEpilogueNotAllowedLowTripLoop:
4958     // fallthrough as a special case of OptForSize
4959   case CM_ScalarEpilogueNotAllowedOptSize:
4960     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
4961       LLVM_DEBUG(
4962           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
4963     else
4964       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
4965                         << "count.\n");
4966 
4967     // Bail if runtime checks are required, which are not good when optimising
4968     // for size.
4969     if (runtimeChecksRequired())
4970       return None;
4971     break;
4972   }
4973 
4974   // Now try the tail folding
4975 
4976   // Invalidate interleave groups that require an epilogue if we can't mask
4977   // the interleave-group.
4978   if (!useMaskedInterleavedAccesses(TTI))
4979     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
4980 
4981   unsigned MaxVF = computeFeasibleMaxVF(TC);
4982   if (TC > 0 && TC % MaxVF == 0) {
4983     // Accept MaxVF if we do not have a tail.
4984     LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
4985     return MaxVF;
4986   }
4987 
4988   // If we don't know the precise trip count, or if the trip count that we
4989   // found modulo the vectorization factor is not zero, try to fold the tail
4990   // by masking.
4991   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
4992   if (Legal->prepareToFoldTailByMasking()) {
4993     FoldTailByMasking = true;
4994     return MaxVF;
4995   }
4996 
4997   if (TC == 0) {
4998     reportVectorizationFailure(
4999         "Unable to calculate the loop count due to complex control flow",
5000         "unable to calculate the loop count due to complex control flow",
5001         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5002     return None;
5003   }
5004 
5005   reportVectorizationFailure(
5006       "Cannot optimize for size and vectorize at the same time.",
5007       "cannot optimize for size and vectorize at the same time. "
5008       "Enable vectorization of this loop with '#pragma clang loop "
5009       "vectorize(enable)' when compiling with -Os/-Oz",
5010       "NoTailLoopWithOptForSize", ORE, TheLoop);
5011   return None;
5012 }
5013 
5014 unsigned
5015 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount) {
5016   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5017   unsigned SmallestType, WidestType;
5018   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5019   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
5020 
5021   // Get the maximum safe dependence distance in bits computed by LAA.
5022   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5023   // the memory accesses that is most restrictive (involved in the smallest
5024   // dependence distance).
5025   unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth();
5026 
5027   WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth);
5028 
5029   unsigned MaxVectorSize = WidestRegister / WidestType;
5030 
5031   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5032                     << " / " << WidestType << " bits.\n");
5033   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5034                     << WidestRegister << " bits.\n");
5035 
5036   assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements"
5037                                  " into one vector!");
5038   if (MaxVectorSize == 0) {
5039     LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n");
5040     MaxVectorSize = 1;
5041     return MaxVectorSize;
5042   } else if (ConstTripCount && ConstTripCount < MaxVectorSize &&
5043              isPowerOf2_32(ConstTripCount)) {
5044     // We need to clamp the VF to be the ConstTripCount. There is no point in
5045     // choosing a higher viable VF as done in the loop below.
5046     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5047                       << ConstTripCount << "\n");
5048     MaxVectorSize = ConstTripCount;
5049     return MaxVectorSize;
5050   }
5051 
5052   unsigned MaxVF = MaxVectorSize;
5053   if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) ||
5054       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5055     // Collect all viable vectorization factors larger than the default MaxVF
5056     // (i.e. MaxVectorSize).
5057     SmallVector<unsigned, 8> VFs;
5058     unsigned NewMaxVectorSize = WidestRegister / SmallestType;
5059     for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2)
5060       VFs.push_back(VS);
5061 
5062     // For each VF calculate its register usage.
5063     auto RUs = calculateRegisterUsage(VFs);
5064 
5065     // Select the largest VF which doesn't require more registers than existing
5066     // ones.
5067     for (int i = RUs.size() - 1; i >= 0; --i) {
5068       bool Selected = true;
5069       for (auto& pair : RUs[i].MaxLocalUsers) {
5070         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5071         if (pair.second > TargetNumRegisters)
5072           Selected = false;
5073       }
5074       if (Selected) {
5075         MaxVF = VFs[i];
5076         break;
5077       }
5078     }
5079     if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) {
5080       if (MaxVF < MinVF) {
5081         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5082                           << ") with target's minimum: " << MinVF << '\n');
5083         MaxVF = MinVF;
5084       }
5085     }
5086   }
5087   return MaxVF;
5088 }
5089 
5090 VectorizationFactor
5091 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
5092   float Cost = expectedCost(1).first;
5093   const float ScalarCost = Cost;
5094   unsigned Width = 1;
5095   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
5096 
5097   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5098   if (ForceVectorization && MaxVF > 1) {
5099     // Ignore scalar width, because the user explicitly wants vectorization.
5100     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5101     // evaluation.
5102     Cost = std::numeric_limits<float>::max();
5103   }
5104 
5105   for (unsigned i = 2; i <= MaxVF; i *= 2) {
5106     // Notice that the vector loop needs to be executed less times, so
5107     // we need to divide the cost of the vector loops by the width of
5108     // the vector elements.
5109     VectorizationCostTy C = expectedCost(i);
5110     float VectorCost = C.first / (float)i;
5111     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5112                       << " costs: " << (int)VectorCost << ".\n");
5113     if (!C.second && !ForceVectorization) {
5114       LLVM_DEBUG(
5115           dbgs() << "LV: Not considering vector loop of width " << i
5116                  << " because it will not generate any vector instructions.\n");
5117       continue;
5118     }
5119     if (VectorCost < Cost) {
5120       Cost = VectorCost;
5121       Width = i;
5122     }
5123   }
5124 
5125   if (!EnableCondStoresVectorization && NumPredStores) {
5126     reportVectorizationFailure("There are conditional stores.",
5127         "store that is conditionally executed prevents vectorization",
5128         "ConditionalStore", ORE, TheLoop);
5129     Width = 1;
5130     Cost = ScalarCost;
5131   }
5132 
5133   LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
5134              << "LV: Vectorization seems to be not beneficial, "
5135              << "but was forced by a user.\n");
5136   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
5137   VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)};
5138   return Factor;
5139 }
5140 
5141 std::pair<unsigned, unsigned>
5142 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5143   unsigned MinWidth = -1U;
5144   unsigned MaxWidth = 8;
5145   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5146 
5147   // For each block.
5148   for (BasicBlock *BB : TheLoop->blocks()) {
5149     // For each instruction in the loop.
5150     for (Instruction &I : BB->instructionsWithoutDebug()) {
5151       Type *T = I.getType();
5152 
5153       // Skip ignored values.
5154       if (ValuesToIgnore.find(&I) != ValuesToIgnore.end())
5155         continue;
5156 
5157       // Only examine Loads, Stores and PHINodes.
5158       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
5159         continue;
5160 
5161       // Examine PHI nodes that are reduction variables. Update the type to
5162       // account for the recurrence type.
5163       if (auto *PN = dyn_cast<PHINode>(&I)) {
5164         if (!Legal->isReductionVariable(PN))
5165           continue;
5166         RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN];
5167         T = RdxDesc.getRecurrenceType();
5168       }
5169 
5170       // Examine the stored values.
5171       if (auto *ST = dyn_cast<StoreInst>(&I))
5172         T = ST->getValueOperand()->getType();
5173 
5174       // Ignore loaded pointer types and stored pointer types that are not
5175       // vectorizable.
5176       //
5177       // FIXME: The check here attempts to predict whether a load or store will
5178       //        be vectorized. We only know this for certain after a VF has
5179       //        been selected. Here, we assume that if an access can be
5180       //        vectorized, it will be. We should also look at extending this
5181       //        optimization to non-pointer types.
5182       //
5183       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
5184           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
5185         continue;
5186 
5187       MinWidth = std::min(MinWidth,
5188                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5189       MaxWidth = std::max(MaxWidth,
5190                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5191     }
5192   }
5193 
5194   return {MinWidth, MaxWidth};
5195 }
5196 
5197 unsigned LoopVectorizationCostModel::selectInterleaveCount(unsigned VF,
5198                                                            unsigned LoopCost) {
5199   // -- The interleave heuristics --
5200   // We interleave the loop in order to expose ILP and reduce the loop overhead.
5201   // There are many micro-architectural considerations that we can't predict
5202   // at this level. For example, frontend pressure (on decode or fetch) due to
5203   // code size, or the number and capabilities of the execution ports.
5204   //
5205   // We use the following heuristics to select the interleave count:
5206   // 1. If the code has reductions, then we interleave to break the cross
5207   // iteration dependency.
5208   // 2. If the loop is really small, then we interleave to reduce the loop
5209   // overhead.
5210   // 3. We don't interleave if we think that we will spill registers to memory
5211   // due to the increased register pressure.
5212 
5213   if (!isScalarEpilogueAllowed())
5214     return 1;
5215 
5216   // We used the distance for the interleave count.
5217   if (Legal->getMaxSafeDepDistBytes() != -1U)
5218     return 1;
5219 
5220   // Do not interleave loops with a relatively small known or estimated trip
5221   // count.
5222   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
5223   if (BestKnownTC && *BestKnownTC < TinyTripCountInterleaveThreshold)
5224     return 1;
5225 
5226   RegisterUsage R = calculateRegisterUsage({VF})[0];
5227   // We divide by these constants so assume that we have at least one
5228   // instruction that uses at least one register.
5229   for (auto& pair : R.MaxLocalUsers) {
5230     pair.second = std::max(pair.second, 1U);
5231   }
5232 
5233   // We calculate the interleave count using the following formula.
5234   // Subtract the number of loop invariants from the number of available
5235   // registers. These registers are used by all of the interleaved instances.
5236   // Next, divide the remaining registers by the number of registers that is
5237   // required by the loop, in order to estimate how many parallel instances
5238   // fit without causing spills. All of this is rounded down if necessary to be
5239   // a power of two. We want power of two interleave count to simplify any
5240   // addressing operations or alignment considerations.
5241   // We also want power of two interleave counts to ensure that the induction
5242   // variable of the vector loop wraps to zero, when tail is folded by masking;
5243   // this currently happens when OptForSize, in which case IC is set to 1 above.
5244   unsigned IC = UINT_MAX;
5245 
5246   for (auto& pair : R.MaxLocalUsers) {
5247     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5248     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
5249                       << " registers of "
5250                       << TTI.getRegisterClassName(pair.first) << " register class\n");
5251     if (VF == 1) {
5252       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
5253         TargetNumRegisters = ForceTargetNumScalarRegs;
5254     } else {
5255       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
5256         TargetNumRegisters = ForceTargetNumVectorRegs;
5257     }
5258     unsigned MaxLocalUsers = pair.second;
5259     unsigned LoopInvariantRegs = 0;
5260     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
5261       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
5262 
5263     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
5264     // Don't count the induction variable as interleaved.
5265     if (EnableIndVarRegisterHeur) {
5266       TmpIC =
5267           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
5268                         std::max(1U, (MaxLocalUsers - 1)));
5269     }
5270 
5271     IC = std::min(IC, TmpIC);
5272   }
5273 
5274   // Clamp the interleave ranges to reasonable counts.
5275   unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
5276 
5277   // Check if the user has overridden the max.
5278   if (VF == 1) {
5279     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
5280       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
5281   } else {
5282     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
5283       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
5284   }
5285 
5286   // If trip count is known or estimated compile time constant, limit the
5287   // interleave count to be less than the trip count divided by VF.
5288   if (BestKnownTC) {
5289     MaxInterleaveCount = std::min(*BestKnownTC / VF, MaxInterleaveCount);
5290   }
5291 
5292   // If we did not calculate the cost for VF (because the user selected the VF)
5293   // then we calculate the cost of VF here.
5294   if (LoopCost == 0)
5295     LoopCost = expectedCost(VF).first;
5296 
5297   assert(LoopCost && "Non-zero loop cost expected");
5298 
5299   // Clamp the calculated IC to be between the 1 and the max interleave count
5300   // that the target and trip count allows.
5301   if (IC > MaxInterleaveCount)
5302     IC = MaxInterleaveCount;
5303   else if (IC < 1)
5304     IC = 1;
5305 
5306   // Interleave if we vectorized this loop and there is a reduction that could
5307   // benefit from interleaving.
5308   if (VF > 1 && !Legal->getReductionVars().empty()) {
5309     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
5310     return IC;
5311   }
5312 
5313   // Note that if we've already vectorized the loop we will have done the
5314   // runtime check and so interleaving won't require further checks.
5315   bool InterleavingRequiresRuntimePointerCheck =
5316       (VF == 1 && Legal->getRuntimePointerChecking()->Need);
5317 
5318   // We want to interleave small loops in order to reduce the loop overhead and
5319   // potentially expose ILP opportunities.
5320   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
5321   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
5322     // We assume that the cost overhead is 1 and we use the cost model
5323     // to estimate the cost of the loop and interleave until the cost of the
5324     // loop overhead is about 5% of the cost of the loop.
5325     unsigned SmallIC =
5326         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
5327 
5328     // Interleave until store/load ports (estimated by max interleave count) are
5329     // saturated.
5330     unsigned NumStores = Legal->getNumStores();
5331     unsigned NumLoads = Legal->getNumLoads();
5332     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5333     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5334 
5335     // If we have a scalar reduction (vector reductions are already dealt with
5336     // by this point), we can increase the critical path length if the loop
5337     // we're interleaving is inside another loop. Limit, by default to 2, so the
5338     // critical path only gets increased by one reduction operation.
5339     if (!Legal->getReductionVars().empty() && TheLoop->getLoopDepth() > 1) {
5340       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
5341       SmallIC = std::min(SmallIC, F);
5342       StoresIC = std::min(StoresIC, F);
5343       LoadsIC = std::min(LoadsIC, F);
5344     }
5345 
5346     if (EnableLoadStoreRuntimeInterleave &&
5347         std::max(StoresIC, LoadsIC) > SmallIC) {
5348       LLVM_DEBUG(
5349           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
5350       return std::max(StoresIC, LoadsIC);
5351     }
5352 
5353     LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
5354     return SmallIC;
5355   }
5356 
5357   // Interleave if this is a large loop (small loops are already dealt with by
5358   // this point) that could benefit from interleaving.
5359   bool HasReductions = !Legal->getReductionVars().empty();
5360   if (TTI.enableAggressiveInterleaving(HasReductions)) {
5361     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5362     return IC;
5363   }
5364 
5365   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
5366   return 1;
5367 }
5368 
5369 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
5370 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
5371   // This function calculates the register usage by measuring the highest number
5372   // of values that are alive at a single location. Obviously, this is a very
5373   // rough estimation. We scan the loop in a topological order in order and
5374   // assign a number to each instruction. We use RPO to ensure that defs are
5375   // met before their users. We assume that each instruction that has in-loop
5376   // users starts an interval. We record every time that an in-loop value is
5377   // used, so we have a list of the first and last occurrences of each
5378   // instruction. Next, we transpose this data structure into a multi map that
5379   // holds the list of intervals that *end* at a specific location. This multi
5380   // map allows us to perform a linear search. We scan the instructions linearly
5381   // and record each time that a new interval starts, by placing it in a set.
5382   // If we find this value in the multi-map then we remove it from the set.
5383   // The max register usage is the maximum size of the set.
5384   // We also search for instructions that are defined outside the loop, but are
5385   // used inside the loop. We need this number separately from the max-interval
5386   // usage number because when we unroll, loop-invariant values do not take
5387   // more register.
5388   LoopBlocksDFS DFS(TheLoop);
5389   DFS.perform(LI);
5390 
5391   RegisterUsage RU;
5392 
5393   // Each 'key' in the map opens a new interval. The values
5394   // of the map are the index of the 'last seen' usage of the
5395   // instruction that is the key.
5396   using IntervalMap = DenseMap<Instruction *, unsigned>;
5397 
5398   // Maps instruction to its index.
5399   SmallVector<Instruction *, 64> IdxToInstr;
5400   // Marks the end of each interval.
5401   IntervalMap EndPoint;
5402   // Saves the list of instruction indices that are used in the loop.
5403   SmallPtrSet<Instruction *, 8> Ends;
5404   // Saves the list of values that are used in the loop but are
5405   // defined outside the loop, such as arguments and constants.
5406   SmallPtrSet<Value *, 8> LoopInvariants;
5407 
5408   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
5409     for (Instruction &I : BB->instructionsWithoutDebug()) {
5410       IdxToInstr.push_back(&I);
5411 
5412       // Save the end location of each USE.
5413       for (Value *U : I.operands()) {
5414         auto *Instr = dyn_cast<Instruction>(U);
5415 
5416         // Ignore non-instruction values such as arguments, constants, etc.
5417         if (!Instr)
5418           continue;
5419 
5420         // If this instruction is outside the loop then record it and continue.
5421         if (!TheLoop->contains(Instr)) {
5422           LoopInvariants.insert(Instr);
5423           continue;
5424         }
5425 
5426         // Overwrite previous end points.
5427         EndPoint[Instr] = IdxToInstr.size();
5428         Ends.insert(Instr);
5429       }
5430     }
5431   }
5432 
5433   // Saves the list of intervals that end with the index in 'key'.
5434   using InstrList = SmallVector<Instruction *, 2>;
5435   DenseMap<unsigned, InstrList> TransposeEnds;
5436 
5437   // Transpose the EndPoints to a list of values that end at each index.
5438   for (auto &Interval : EndPoint)
5439     TransposeEnds[Interval.second].push_back(Interval.first);
5440 
5441   SmallPtrSet<Instruction *, 8> OpenIntervals;
5442 
5443   // Get the size of the widest register.
5444   unsigned MaxSafeDepDist = -1U;
5445   if (Legal->getMaxSafeDepDistBytes() != -1U)
5446     MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
5447   unsigned WidestRegister =
5448       std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist);
5449   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5450 
5451   SmallVector<RegisterUsage, 8> RUs(VFs.size());
5452   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
5453 
5454   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
5455 
5456   // A lambda that gets the register usage for the given type and VF.
5457   auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
5458     if (Ty->isTokenTy())
5459       return 0U;
5460     unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
5461     return std::max<unsigned>(1, VF * TypeSize / WidestRegister);
5462   };
5463 
5464   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
5465     Instruction *I = IdxToInstr[i];
5466 
5467     // Remove all of the instructions that end at this location.
5468     InstrList &List = TransposeEnds[i];
5469     for (Instruction *ToRemove : List)
5470       OpenIntervals.erase(ToRemove);
5471 
5472     // Ignore instructions that are never used within the loop.
5473     if (Ends.find(I) == Ends.end())
5474       continue;
5475 
5476     // Skip ignored values.
5477     if (ValuesToIgnore.find(I) != ValuesToIgnore.end())
5478       continue;
5479 
5480     // For each VF find the maximum usage of registers.
5481     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
5482       // Count the number of live intervals.
5483       SmallMapVector<unsigned, unsigned, 4> RegUsage;
5484 
5485       if (VFs[j] == 1) {
5486         for (auto Inst : OpenIntervals) {
5487           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
5488           if (RegUsage.find(ClassID) == RegUsage.end())
5489             RegUsage[ClassID] = 1;
5490           else
5491             RegUsage[ClassID] += 1;
5492         }
5493       } else {
5494         collectUniformsAndScalars(VFs[j]);
5495         for (auto Inst : OpenIntervals) {
5496           // Skip ignored values for VF > 1.
5497           if (VecValuesToIgnore.find(Inst) != VecValuesToIgnore.end())
5498             continue;
5499           if (isScalarAfterVectorization(Inst, VFs[j])) {
5500             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
5501             if (RegUsage.find(ClassID) == RegUsage.end())
5502               RegUsage[ClassID] = 1;
5503             else
5504               RegUsage[ClassID] += 1;
5505           } else {
5506             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
5507             if (RegUsage.find(ClassID) == RegUsage.end())
5508               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
5509             else
5510               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
5511           }
5512         }
5513       }
5514 
5515       for (auto& pair : RegUsage) {
5516         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
5517           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
5518         else
5519           MaxUsages[j][pair.first] = pair.second;
5520       }
5521     }
5522 
5523     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
5524                       << OpenIntervals.size() << '\n');
5525 
5526     // Add the current instruction to the list of open intervals.
5527     OpenIntervals.insert(I);
5528   }
5529 
5530   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
5531     SmallMapVector<unsigned, unsigned, 4> Invariant;
5532 
5533     for (auto Inst : LoopInvariants) {
5534       unsigned Usage = VFs[i] == 1 ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
5535       unsigned ClassID = TTI.getRegisterClassForType(VFs[i] > 1, Inst->getType());
5536       if (Invariant.find(ClassID) == Invariant.end())
5537         Invariant[ClassID] = Usage;
5538       else
5539         Invariant[ClassID] += Usage;
5540     }
5541 
5542     LLVM_DEBUG({
5543       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
5544       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
5545              << " item\n";
5546       for (const auto &pair : MaxUsages[i]) {
5547         dbgs() << "LV(REG): RegisterClass: "
5548                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
5549                << " registers\n";
5550       }
5551       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
5552              << " item\n";
5553       for (const auto &pair : Invariant) {
5554         dbgs() << "LV(REG): RegisterClass: "
5555                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
5556                << " registers\n";
5557       }
5558     });
5559 
5560     RU.LoopInvariantRegs = Invariant;
5561     RU.MaxLocalUsers = MaxUsages[i];
5562     RUs[i] = RU;
5563   }
5564 
5565   return RUs;
5566 }
5567 
5568 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
5569   // TODO: Cost model for emulated masked load/store is completely
5570   // broken. This hack guides the cost model to use an artificially
5571   // high enough value to practically disable vectorization with such
5572   // operations, except where previously deployed legality hack allowed
5573   // using very low cost values. This is to avoid regressions coming simply
5574   // from moving "masked load/store" check from legality to cost model.
5575   // Masked Load/Gather emulation was previously never allowed.
5576   // Limited number of Masked Store/Scatter emulation was allowed.
5577   assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction");
5578   return isa<LoadInst>(I) ||
5579          (isa<StoreInst>(I) &&
5580           NumPredStores > NumberOfStoresToPredicate);
5581 }
5582 
5583 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) {
5584   // If we aren't vectorizing the loop, or if we've already collected the
5585   // instructions to scalarize, there's nothing to do. Collection may already
5586   // have occurred if we have a user-selected VF and are now computing the
5587   // expected cost for interleaving.
5588   if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end())
5589     return;
5590 
5591   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
5592   // not profitable to scalarize any instructions, the presence of VF in the
5593   // map will indicate that we've analyzed it already.
5594   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
5595 
5596   // Find all the instructions that are scalar with predication in the loop and
5597   // determine if it would be better to not if-convert the blocks they are in.
5598   // If so, we also record the instructions to scalarize.
5599   for (BasicBlock *BB : TheLoop->blocks()) {
5600     if (!blockNeedsPredication(BB))
5601       continue;
5602     for (Instruction &I : *BB)
5603       if (isScalarWithPredication(&I)) {
5604         ScalarCostsTy ScalarCosts;
5605         // Do not apply discount logic if hacked cost is needed
5606         // for emulated masked memrefs.
5607         if (!useEmulatedMaskMemRefHack(&I) &&
5608             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
5609           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
5610         // Remember that BB will remain after vectorization.
5611         PredicatedBBsAfterVectorization.insert(BB);
5612       }
5613   }
5614 }
5615 
5616 int LoopVectorizationCostModel::computePredInstDiscount(
5617     Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts,
5618     unsigned VF) {
5619   assert(!isUniformAfterVectorization(PredInst, VF) &&
5620          "Instruction marked uniform-after-vectorization will be predicated");
5621 
5622   // Initialize the discount to zero, meaning that the scalar version and the
5623   // vector version cost the same.
5624   int Discount = 0;
5625 
5626   // Holds instructions to analyze. The instructions we visit are mapped in
5627   // ScalarCosts. Those instructions are the ones that would be scalarized if
5628   // we find that the scalar version costs less.
5629   SmallVector<Instruction *, 8> Worklist;
5630 
5631   // Returns true if the given instruction can be scalarized.
5632   auto canBeScalarized = [&](Instruction *I) -> bool {
5633     // We only attempt to scalarize instructions forming a single-use chain
5634     // from the original predicated block that would otherwise be vectorized.
5635     // Although not strictly necessary, we give up on instructions we know will
5636     // already be scalar to avoid traversing chains that are unlikely to be
5637     // beneficial.
5638     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
5639         isScalarAfterVectorization(I, VF))
5640       return false;
5641 
5642     // If the instruction is scalar with predication, it will be analyzed
5643     // separately. We ignore it within the context of PredInst.
5644     if (isScalarWithPredication(I))
5645       return false;
5646 
5647     // If any of the instruction's operands are uniform after vectorization,
5648     // the instruction cannot be scalarized. This prevents, for example, a
5649     // masked load from being scalarized.
5650     //
5651     // We assume we will only emit a value for lane zero of an instruction
5652     // marked uniform after vectorization, rather than VF identical values.
5653     // Thus, if we scalarize an instruction that uses a uniform, we would
5654     // create uses of values corresponding to the lanes we aren't emitting code
5655     // for. This behavior can be changed by allowing getScalarValue to clone
5656     // the lane zero values for uniforms rather than asserting.
5657     for (Use &U : I->operands())
5658       if (auto *J = dyn_cast<Instruction>(U.get()))
5659         if (isUniformAfterVectorization(J, VF))
5660           return false;
5661 
5662     // Otherwise, we can scalarize the instruction.
5663     return true;
5664   };
5665 
5666   // Compute the expected cost discount from scalarizing the entire expression
5667   // feeding the predicated instruction. We currently only consider expressions
5668   // that are single-use instruction chains.
5669   Worklist.push_back(PredInst);
5670   while (!Worklist.empty()) {
5671     Instruction *I = Worklist.pop_back_val();
5672 
5673     // If we've already analyzed the instruction, there's nothing to do.
5674     if (ScalarCosts.find(I) != ScalarCosts.end())
5675       continue;
5676 
5677     // Compute the cost of the vector instruction. Note that this cost already
5678     // includes the scalarization overhead of the predicated instruction.
5679     unsigned VectorCost = getInstructionCost(I, VF).first;
5680 
5681     // Compute the cost of the scalarized instruction. This cost is the cost of
5682     // the instruction as if it wasn't if-converted and instead remained in the
5683     // predicated block. We will scale this cost by block probability after
5684     // computing the scalarization overhead.
5685     unsigned ScalarCost = VF * getInstructionCost(I, 1).first;
5686 
5687     // Compute the scalarization overhead of needed insertelement instructions
5688     // and phi nodes.
5689     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
5690       ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF),
5691                                                  true, false);
5692       ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI);
5693     }
5694 
5695     // Compute the scalarization overhead of needed extractelement
5696     // instructions. For each of the instruction's operands, if the operand can
5697     // be scalarized, add it to the worklist; otherwise, account for the
5698     // overhead.
5699     for (Use &U : I->operands())
5700       if (auto *J = dyn_cast<Instruction>(U.get())) {
5701         assert(VectorType::isValidElementType(J->getType()) &&
5702                "Instruction has non-scalar type");
5703         if (canBeScalarized(J))
5704           Worklist.push_back(J);
5705         else if (needsExtract(J, VF))
5706           ScalarCost += TTI.getScalarizationOverhead(
5707                               ToVectorTy(J->getType(),VF), false, true);
5708       }
5709 
5710     // Scale the total scalar cost by block probability.
5711     ScalarCost /= getReciprocalPredBlockProb();
5712 
5713     // Compute the discount. A non-negative discount means the vector version
5714     // of the instruction costs more, and scalarizing would be beneficial.
5715     Discount += VectorCost - ScalarCost;
5716     ScalarCosts[I] = ScalarCost;
5717   }
5718 
5719   return Discount;
5720 }
5721 
5722 LoopVectorizationCostModel::VectorizationCostTy
5723 LoopVectorizationCostModel::expectedCost(unsigned VF) {
5724   VectorizationCostTy Cost;
5725 
5726   // For each block.
5727   for (BasicBlock *BB : TheLoop->blocks()) {
5728     VectorizationCostTy BlockCost;
5729 
5730     // For each instruction in the old loop.
5731     for (Instruction &I : BB->instructionsWithoutDebug()) {
5732       // Skip ignored values.
5733       if (ValuesToIgnore.find(&I) != ValuesToIgnore.end() ||
5734           (VF > 1 && VecValuesToIgnore.find(&I) != VecValuesToIgnore.end()))
5735         continue;
5736 
5737       VectorizationCostTy C = getInstructionCost(&I, VF);
5738 
5739       // Check if we should override the cost.
5740       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
5741         C.first = ForceTargetInstructionCost;
5742 
5743       BlockCost.first += C.first;
5744       BlockCost.second |= C.second;
5745       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
5746                         << " for VF " << VF << " For instruction: " << I
5747                         << '\n');
5748     }
5749 
5750     // If we are vectorizing a predicated block, it will have been
5751     // if-converted. This means that the block's instructions (aside from
5752     // stores and instructions that may divide by zero) will now be
5753     // unconditionally executed. For the scalar case, we may not always execute
5754     // the predicated block. Thus, scale the block's cost by the probability of
5755     // executing it.
5756     if (VF == 1 && blockNeedsPredication(BB))
5757       BlockCost.first /= getReciprocalPredBlockProb();
5758 
5759     Cost.first += BlockCost.first;
5760     Cost.second |= BlockCost.second;
5761   }
5762 
5763   return Cost;
5764 }
5765 
5766 /// Gets Address Access SCEV after verifying that the access pattern
5767 /// is loop invariant except the induction variable dependence.
5768 ///
5769 /// This SCEV can be sent to the Target in order to estimate the address
5770 /// calculation cost.
5771 static const SCEV *getAddressAccessSCEV(
5772               Value *Ptr,
5773               LoopVectorizationLegality *Legal,
5774               PredicatedScalarEvolution &PSE,
5775               const Loop *TheLoop) {
5776 
5777   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
5778   if (!Gep)
5779     return nullptr;
5780 
5781   // We are looking for a gep with all loop invariant indices except for one
5782   // which should be an induction variable.
5783   auto SE = PSE.getSE();
5784   unsigned NumOperands = Gep->getNumOperands();
5785   for (unsigned i = 1; i < NumOperands; ++i) {
5786     Value *Opd = Gep->getOperand(i);
5787     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
5788         !Legal->isInductionVariable(Opd))
5789       return nullptr;
5790   }
5791 
5792   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
5793   return PSE.getSCEV(Ptr);
5794 }
5795 
5796 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
5797   return Legal->hasStride(I->getOperand(0)) ||
5798          Legal->hasStride(I->getOperand(1));
5799 }
5800 
5801 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
5802                                                                  unsigned VF) {
5803   assert(VF > 1 && "Scalarization cost of instruction implies vectorization.");
5804   Type *ValTy = getMemInstValueType(I);
5805   auto SE = PSE.getSE();
5806 
5807   unsigned AS = getLoadStoreAddressSpace(I);
5808   Value *Ptr = getLoadStorePointerOperand(I);
5809   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
5810 
5811   // Figure out whether the access is strided and get the stride value
5812   // if it's known in compile time
5813   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
5814 
5815   // Get the cost of the scalar memory instruction and address computation.
5816   unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
5817 
5818   // Don't pass *I here, since it is scalar but will actually be part of a
5819   // vectorized loop where the user of it is a vectorized instruction.
5820   const MaybeAlign Alignment = getLoadStoreAlignment(I);
5821   Cost += VF * TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(),
5822                                    Alignment, AS);
5823 
5824   // Get the overhead of the extractelement and insertelement instructions
5825   // we might create due to scalarization.
5826   Cost += getScalarizationOverhead(I, VF);
5827 
5828   // If we have a predicated store, it may not be executed for each vector
5829   // lane. Scale the cost by the probability of executing the predicated
5830   // block.
5831   if (isPredicatedInst(I)) {
5832     Cost /= getReciprocalPredBlockProb();
5833 
5834     if (useEmulatedMaskMemRefHack(I))
5835       // Artificially setting to a high enough value to practically disable
5836       // vectorization with such operations.
5837       Cost = 3000000;
5838   }
5839 
5840   return Cost;
5841 }
5842 
5843 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
5844                                                              unsigned VF) {
5845   Type *ValTy = getMemInstValueType(I);
5846   Type *VectorTy = ToVectorTy(ValTy, VF);
5847   Value *Ptr = getLoadStorePointerOperand(I);
5848   unsigned AS = getLoadStoreAddressSpace(I);
5849   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
5850 
5851   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
5852          "Stride should be 1 or -1 for consecutive memory access");
5853   const MaybeAlign Alignment = getLoadStoreAlignment(I);
5854   unsigned Cost = 0;
5855   if (Legal->isMaskRequired(I))
5856     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy,
5857                                       Alignment ? Alignment->value() : 0, AS);
5858   else
5859     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I);
5860 
5861   bool Reverse = ConsecutiveStride < 0;
5862   if (Reverse)
5863     Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5864   return Cost;
5865 }
5866 
5867 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
5868                                                          unsigned VF) {
5869   Type *ValTy = getMemInstValueType(I);
5870   Type *VectorTy = ToVectorTy(ValTy, VF);
5871   const MaybeAlign Alignment = getLoadStoreAlignment(I);
5872   unsigned AS = getLoadStoreAddressSpace(I);
5873   if (isa<LoadInst>(I)) {
5874     return TTI.getAddressComputationCost(ValTy) +
5875            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) +
5876            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
5877   }
5878   StoreInst *SI = cast<StoreInst>(I);
5879 
5880   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
5881   return TTI.getAddressComputationCost(ValTy) +
5882          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS) +
5883          (isLoopInvariantStoreValue
5884               ? 0
5885               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
5886                                        VF - 1));
5887 }
5888 
5889 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
5890                                                           unsigned VF) {
5891   Type *ValTy = getMemInstValueType(I);
5892   Type *VectorTy = ToVectorTy(ValTy, VF);
5893   const MaybeAlign Alignment = getLoadStoreAlignment(I);
5894   Value *Ptr = getLoadStorePointerOperand(I);
5895 
5896   return TTI.getAddressComputationCost(VectorTy) +
5897          TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
5898                                     Legal->isMaskRequired(I),
5899                                     Alignment ? Alignment->value() : 0, I);
5900 }
5901 
5902 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
5903                                                             unsigned VF) {
5904   Type *ValTy = getMemInstValueType(I);
5905   Type *VectorTy = ToVectorTy(ValTy, VF);
5906   unsigned AS = getLoadStoreAddressSpace(I);
5907 
5908   auto Group = getInterleavedAccessGroup(I);
5909   assert(Group && "Fail to get an interleaved access group.");
5910 
5911   unsigned InterleaveFactor = Group->getFactor();
5912   Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
5913 
5914   // Holds the indices of existing members in an interleaved load group.
5915   // An interleaved store group doesn't need this as it doesn't allow gaps.
5916   SmallVector<unsigned, 4> Indices;
5917   if (isa<LoadInst>(I)) {
5918     for (unsigned i = 0; i < InterleaveFactor; i++)
5919       if (Group->getMember(i))
5920         Indices.push_back(i);
5921   }
5922 
5923   // Calculate the cost of the whole interleaved group.
5924   bool UseMaskForGaps =
5925       Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed();
5926   unsigned Cost = TTI.getInterleavedMemoryOpCost(
5927       I->getOpcode(), WideVecTy, Group->getFactor(), Indices,
5928       Group->getAlign().value(), AS, Legal->isMaskRequired(I), UseMaskForGaps);
5929 
5930   if (Group->isReverse()) {
5931     // TODO: Add support for reversed masked interleaved access.
5932     assert(!Legal->isMaskRequired(I) &&
5933            "Reverse masked interleaved access not supported.");
5934     Cost += Group->getNumMembers() *
5935             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5936   }
5937   return Cost;
5938 }
5939 
5940 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
5941                                                               unsigned VF) {
5942   // Calculate scalar cost only. Vectorization cost should be ready at this
5943   // moment.
5944   if (VF == 1) {
5945     Type *ValTy = getMemInstValueType(I);
5946     const MaybeAlign Alignment = getLoadStoreAlignment(I);
5947     unsigned AS = getLoadStoreAddressSpace(I);
5948 
5949     return TTI.getAddressComputationCost(ValTy) +
5950            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I);
5951   }
5952   return getWideningCost(I, VF);
5953 }
5954 
5955 LoopVectorizationCostModel::VectorizationCostTy
5956 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
5957   // If we know that this instruction will remain uniform, check the cost of
5958   // the scalar version.
5959   if (isUniformAfterVectorization(I, VF))
5960     VF = 1;
5961 
5962   if (VF > 1 && isProfitableToScalarize(I, VF))
5963     return VectorizationCostTy(InstsToScalarize[VF][I], false);
5964 
5965   // Forced scalars do not have any scalarization overhead.
5966   auto ForcedScalar = ForcedScalars.find(VF);
5967   if (VF > 1 && ForcedScalar != ForcedScalars.end()) {
5968     auto InstSet = ForcedScalar->second;
5969     if (InstSet.find(I) != InstSet.end())
5970       return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false);
5971   }
5972 
5973   Type *VectorTy;
5974   unsigned C = getInstructionCost(I, VF, VectorTy);
5975 
5976   bool TypeNotScalarized =
5977       VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF;
5978   return VectorizationCostTy(C, TypeNotScalarized);
5979 }
5980 
5981 unsigned LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
5982                                                               unsigned VF) {
5983 
5984   if (VF == 1)
5985     return 0;
5986 
5987   unsigned Cost = 0;
5988   Type *RetTy = ToVectorTy(I->getType(), VF);
5989   if (!RetTy->isVoidTy() &&
5990       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
5991     Cost += TTI.getScalarizationOverhead(RetTy, true, false);
5992 
5993   // Some targets keep addresses scalar.
5994   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
5995     return Cost;
5996 
5997   // Some targets support efficient element stores.
5998   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
5999     return Cost;
6000 
6001   // Collect operands to consider.
6002   CallInst *CI = dyn_cast<CallInst>(I);
6003   Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands();
6004 
6005   // Skip operands that do not require extraction/scalarization and do not incur
6006   // any overhead.
6007   return Cost + TTI.getOperandsScalarizationOverhead(
6008                     filterExtractingOperands(Ops, VF), VF);
6009 }
6010 
6011 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) {
6012   if (VF == 1)
6013     return;
6014   NumPredStores = 0;
6015   for (BasicBlock *BB : TheLoop->blocks()) {
6016     // For each instruction in the old loop.
6017     for (Instruction &I : *BB) {
6018       Value *Ptr =  getLoadStorePointerOperand(&I);
6019       if (!Ptr)
6020         continue;
6021 
6022       // TODO: We should generate better code and update the cost model for
6023       // predicated uniform stores. Today they are treated as any other
6024       // predicated store (see added test cases in
6025       // invariant-store-vectorization.ll).
6026       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
6027         NumPredStores++;
6028 
6029       if (Legal->isUniform(Ptr) &&
6030           // Conditional loads and stores should be scalarized and predicated.
6031           // isScalarWithPredication cannot be used here since masked
6032           // gather/scatters are not considered scalar with predication.
6033           !Legal->blockNeedsPredication(I.getParent())) {
6034         // TODO: Avoid replicating loads and stores instead of
6035         // relying on instcombine to remove them.
6036         // Load: Scalar load + broadcast
6037         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
6038         unsigned Cost = getUniformMemOpCost(&I, VF);
6039         setWideningDecision(&I, VF, CM_Scalarize, Cost);
6040         continue;
6041       }
6042 
6043       // We assume that widening is the best solution when possible.
6044       if (memoryInstructionCanBeWidened(&I, VF)) {
6045         unsigned Cost = getConsecutiveMemOpCost(&I, VF);
6046         int ConsecutiveStride =
6047                Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
6048         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6049                "Expected consecutive stride.");
6050         InstWidening Decision =
6051             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
6052         setWideningDecision(&I, VF, Decision, Cost);
6053         continue;
6054       }
6055 
6056       // Choose between Interleaving, Gather/Scatter or Scalarization.
6057       unsigned InterleaveCost = std::numeric_limits<unsigned>::max();
6058       unsigned NumAccesses = 1;
6059       if (isAccessInterleaved(&I)) {
6060         auto Group = getInterleavedAccessGroup(&I);
6061         assert(Group && "Fail to get an interleaved access group.");
6062 
6063         // Make one decision for the whole group.
6064         if (getWideningDecision(&I, VF) != CM_Unknown)
6065           continue;
6066 
6067         NumAccesses = Group->getNumMembers();
6068         if (interleavedAccessCanBeWidened(&I, VF))
6069           InterleaveCost = getInterleaveGroupCost(&I, VF);
6070       }
6071 
6072       unsigned GatherScatterCost =
6073           isLegalGatherOrScatter(&I)
6074               ? getGatherScatterCost(&I, VF) * NumAccesses
6075               : std::numeric_limits<unsigned>::max();
6076 
6077       unsigned ScalarizationCost =
6078           getMemInstScalarizationCost(&I, VF) * NumAccesses;
6079 
6080       // Choose better solution for the current VF,
6081       // write down this decision and use it during vectorization.
6082       unsigned Cost;
6083       InstWidening Decision;
6084       if (InterleaveCost <= GatherScatterCost &&
6085           InterleaveCost < ScalarizationCost) {
6086         Decision = CM_Interleave;
6087         Cost = InterleaveCost;
6088       } else if (GatherScatterCost < ScalarizationCost) {
6089         Decision = CM_GatherScatter;
6090         Cost = GatherScatterCost;
6091       } else {
6092         Decision = CM_Scalarize;
6093         Cost = ScalarizationCost;
6094       }
6095       // If the instructions belongs to an interleave group, the whole group
6096       // receives the same decision. The whole group receives the cost, but
6097       // the cost will actually be assigned to one instruction.
6098       if (auto Group = getInterleavedAccessGroup(&I))
6099         setWideningDecision(Group, VF, Decision, Cost);
6100       else
6101         setWideningDecision(&I, VF, Decision, Cost);
6102     }
6103   }
6104 
6105   // Make sure that any load of address and any other address computation
6106   // remains scalar unless there is gather/scatter support. This avoids
6107   // inevitable extracts into address registers, and also has the benefit of
6108   // activating LSR more, since that pass can't optimize vectorized
6109   // addresses.
6110   if (TTI.prefersVectorizedAddressing())
6111     return;
6112 
6113   // Start with all scalar pointer uses.
6114   SmallPtrSet<Instruction *, 8> AddrDefs;
6115   for (BasicBlock *BB : TheLoop->blocks())
6116     for (Instruction &I : *BB) {
6117       Instruction *PtrDef =
6118         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
6119       if (PtrDef && TheLoop->contains(PtrDef) &&
6120           getWideningDecision(&I, VF) != CM_GatherScatter)
6121         AddrDefs.insert(PtrDef);
6122     }
6123 
6124   // Add all instructions used to generate the addresses.
6125   SmallVector<Instruction *, 4> Worklist;
6126   for (auto *I : AddrDefs)
6127     Worklist.push_back(I);
6128   while (!Worklist.empty()) {
6129     Instruction *I = Worklist.pop_back_val();
6130     for (auto &Op : I->operands())
6131       if (auto *InstOp = dyn_cast<Instruction>(Op))
6132         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
6133             AddrDefs.insert(InstOp).second)
6134           Worklist.push_back(InstOp);
6135   }
6136 
6137   for (auto *I : AddrDefs) {
6138     if (isa<LoadInst>(I)) {
6139       // Setting the desired widening decision should ideally be handled in
6140       // by cost functions, but since this involves the task of finding out
6141       // if the loaded register is involved in an address computation, it is
6142       // instead changed here when we know this is the case.
6143       InstWidening Decision = getWideningDecision(I, VF);
6144       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
6145         // Scalarize a widened load of address.
6146         setWideningDecision(I, VF, CM_Scalarize,
6147                             (VF * getMemoryInstructionCost(I, 1)));
6148       else if (auto Group = getInterleavedAccessGroup(I)) {
6149         // Scalarize an interleave group of address loads.
6150         for (unsigned I = 0; I < Group->getFactor(); ++I) {
6151           if (Instruction *Member = Group->getMember(I))
6152             setWideningDecision(Member, VF, CM_Scalarize,
6153                                 (VF * getMemoryInstructionCost(Member, 1)));
6154         }
6155       }
6156     } else
6157       // Make sure I gets scalarized and a cost estimate without
6158       // scalarization overhead.
6159       ForcedScalars[VF].insert(I);
6160   }
6161 }
6162 
6163 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
6164                                                         unsigned VF,
6165                                                         Type *&VectorTy) {
6166   Type *RetTy = I->getType();
6167   if (canTruncateToMinimalBitwidth(I, VF))
6168     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
6169   VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
6170   auto SE = PSE.getSE();
6171 
6172   // TODO: We need to estimate the cost of intrinsic calls.
6173   switch (I->getOpcode()) {
6174   case Instruction::GetElementPtr:
6175     // We mark this instruction as zero-cost because the cost of GEPs in
6176     // vectorized code depends on whether the corresponding memory instruction
6177     // is scalarized or not. Therefore, we handle GEPs with the memory
6178     // instruction cost.
6179     return 0;
6180   case Instruction::Br: {
6181     // In cases of scalarized and predicated instructions, there will be VF
6182     // predicated blocks in the vectorized loop. Each branch around these
6183     // blocks requires also an extract of its vector compare i1 element.
6184     bool ScalarPredicatedBB = false;
6185     BranchInst *BI = cast<BranchInst>(I);
6186     if (VF > 1 && BI->isConditional() &&
6187         (PredicatedBBsAfterVectorization.find(BI->getSuccessor(0)) !=
6188              PredicatedBBsAfterVectorization.end() ||
6189          PredicatedBBsAfterVectorization.find(BI->getSuccessor(1)) !=
6190              PredicatedBBsAfterVectorization.end()))
6191       ScalarPredicatedBB = true;
6192 
6193     if (ScalarPredicatedBB) {
6194       // Return cost for branches around scalarized and predicated blocks.
6195       Type *Vec_i1Ty =
6196           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
6197       return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) +
6198               (TTI.getCFInstrCost(Instruction::Br) * VF));
6199     } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1)
6200       // The back-edge branch will remain, as will all scalar branches.
6201       return TTI.getCFInstrCost(Instruction::Br);
6202     else
6203       // This branch will be eliminated by if-conversion.
6204       return 0;
6205     // Note: We currently assume zero cost for an unconditional branch inside
6206     // a predicated block since it will become a fall-through, although we
6207     // may decide in the future to call TTI for all branches.
6208   }
6209   case Instruction::PHI: {
6210     auto *Phi = cast<PHINode>(I);
6211 
6212     // First-order recurrences are replaced by vector shuffles inside the loop.
6213     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
6214     if (VF > 1 && Legal->isFirstOrderRecurrence(Phi))
6215       return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
6216                                 VectorTy, VF - 1, VectorType::get(RetTy, 1));
6217 
6218     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
6219     // converted into select instructions. We require N - 1 selects per phi
6220     // node, where N is the number of incoming values.
6221     if (VF > 1 && Phi->getParent() != TheLoop->getHeader())
6222       return (Phi->getNumIncomingValues() - 1) *
6223              TTI.getCmpSelInstrCost(
6224                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
6225                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF));
6226 
6227     return TTI.getCFInstrCost(Instruction::PHI);
6228   }
6229   case Instruction::UDiv:
6230   case Instruction::SDiv:
6231   case Instruction::URem:
6232   case Instruction::SRem:
6233     // If we have a predicated instruction, it may not be executed for each
6234     // vector lane. Get the scalarization cost and scale this amount by the
6235     // probability of executing the predicated block. If the instruction is not
6236     // predicated, we fall through to the next case.
6237     if (VF > 1 && isScalarWithPredication(I)) {
6238       unsigned Cost = 0;
6239 
6240       // These instructions have a non-void type, so account for the phi nodes
6241       // that we will create. This cost is likely to be zero. The phi node
6242       // cost, if any, should be scaled by the block probability because it
6243       // models a copy at the end of each predicated block.
6244       Cost += VF * TTI.getCFInstrCost(Instruction::PHI);
6245 
6246       // The cost of the non-predicated instruction.
6247       Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy);
6248 
6249       // The cost of insertelement and extractelement instructions needed for
6250       // scalarization.
6251       Cost += getScalarizationOverhead(I, VF);
6252 
6253       // Scale the cost by the probability of executing the predicated blocks.
6254       // This assumes the predicated block for each vector lane is equally
6255       // likely.
6256       return Cost / getReciprocalPredBlockProb();
6257     }
6258     LLVM_FALLTHROUGH;
6259   case Instruction::Add:
6260   case Instruction::FAdd:
6261   case Instruction::Sub:
6262   case Instruction::FSub:
6263   case Instruction::Mul:
6264   case Instruction::FMul:
6265   case Instruction::FDiv:
6266   case Instruction::FRem:
6267   case Instruction::Shl:
6268   case Instruction::LShr:
6269   case Instruction::AShr:
6270   case Instruction::And:
6271   case Instruction::Or:
6272   case Instruction::Xor: {
6273     // Since we will replace the stride by 1 the multiplication should go away.
6274     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
6275       return 0;
6276     // Certain instructions can be cheaper to vectorize if they have a constant
6277     // second vector operand. One example of this are shifts on x86.
6278     Value *Op2 = I->getOperand(1);
6279     TargetTransformInfo::OperandValueProperties Op2VP;
6280     TargetTransformInfo::OperandValueKind Op2VK =
6281         TTI.getOperandInfo(Op2, Op2VP);
6282     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
6283       Op2VK = TargetTransformInfo::OK_UniformValue;
6284 
6285     SmallVector<const Value *, 4> Operands(I->operand_values());
6286     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6287     return N * TTI.getArithmeticInstrCost(
6288                    I->getOpcode(), VectorTy, TargetTransformInfo::OK_AnyValue,
6289                    Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
6290   }
6291   case Instruction::FNeg: {
6292     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6293     return N * TTI.getArithmeticInstrCost(
6294                    I->getOpcode(), VectorTy, TargetTransformInfo::OK_AnyValue,
6295                    TargetTransformInfo::OK_AnyValue,
6296                    TargetTransformInfo::OP_None, TargetTransformInfo::OP_None,
6297                    I->getOperand(0), I);
6298   }
6299   case Instruction::Select: {
6300     SelectInst *SI = cast<SelectInst>(I);
6301     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
6302     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
6303     Type *CondTy = SI->getCondition()->getType();
6304     if (!ScalarCond)
6305       CondTy = VectorType::get(CondTy, VF);
6306 
6307     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I);
6308   }
6309   case Instruction::ICmp:
6310   case Instruction::FCmp: {
6311     Type *ValTy = I->getOperand(0)->getType();
6312     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
6313     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
6314       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
6315     VectorTy = ToVectorTy(ValTy, VF);
6316     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I);
6317   }
6318   case Instruction::Store:
6319   case Instruction::Load: {
6320     unsigned Width = VF;
6321     if (Width > 1) {
6322       InstWidening Decision = getWideningDecision(I, Width);
6323       assert(Decision != CM_Unknown &&
6324              "CM decision should be taken at this point");
6325       if (Decision == CM_Scalarize)
6326         Width = 1;
6327     }
6328     VectorTy = ToVectorTy(getMemInstValueType(I), Width);
6329     return getMemoryInstructionCost(I, VF);
6330   }
6331   case Instruction::ZExt:
6332   case Instruction::SExt:
6333   case Instruction::FPToUI:
6334   case Instruction::FPToSI:
6335   case Instruction::FPExt:
6336   case Instruction::PtrToInt:
6337   case Instruction::IntToPtr:
6338   case Instruction::SIToFP:
6339   case Instruction::UIToFP:
6340   case Instruction::Trunc:
6341   case Instruction::FPTrunc:
6342   case Instruction::BitCast: {
6343     // We optimize the truncation of induction variables having constant
6344     // integer steps. The cost of these truncations is the same as the scalar
6345     // operation.
6346     if (isOptimizableIVTruncate(I, VF)) {
6347       auto *Trunc = cast<TruncInst>(I);
6348       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
6349                                   Trunc->getSrcTy(), Trunc);
6350     }
6351 
6352     Type *SrcScalarTy = I->getOperand(0)->getType();
6353     Type *SrcVecTy =
6354         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
6355     if (canTruncateToMinimalBitwidth(I, VF)) {
6356       // This cast is going to be shrunk. This may remove the cast or it might
6357       // turn it into slightly different cast. For example, if MinBW == 16,
6358       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
6359       //
6360       // Calculate the modified src and dest types.
6361       Type *MinVecTy = VectorTy;
6362       if (I->getOpcode() == Instruction::Trunc) {
6363         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
6364         VectorTy =
6365             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6366       } else if (I->getOpcode() == Instruction::ZExt ||
6367                  I->getOpcode() == Instruction::SExt) {
6368         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
6369         VectorTy =
6370             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6371       }
6372     }
6373 
6374     unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
6375     return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I);
6376   }
6377   case Instruction::Call: {
6378     bool NeedToScalarize;
6379     CallInst *CI = cast<CallInst>(I);
6380     unsigned CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
6381     if (getVectorIntrinsicIDForCall(CI, TLI))
6382       return std::min(CallCost, getVectorIntrinsicCost(CI, VF));
6383     return CallCost;
6384   }
6385   default:
6386     // The cost of executing VF copies of the scalar instruction. This opcode
6387     // is unknown. Assume that it is the same as 'mul'.
6388     return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) +
6389            getScalarizationOverhead(I, VF);
6390   } // end of switch.
6391 }
6392 
6393 char LoopVectorize::ID = 0;
6394 
6395 static const char lv_name[] = "Loop Vectorization";
6396 
6397 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
6398 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
6399 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
6400 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
6401 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
6402 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
6403 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
6404 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
6405 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
6406 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
6407 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
6408 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
6409 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
6410 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
6411 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
6412 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
6413 
6414 namespace llvm {
6415 
6416 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
6417 
6418 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
6419                               bool VectorizeOnlyWhenForced) {
6420   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
6421 }
6422 
6423 } // end namespace llvm
6424 
6425 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
6426   // Check if the pointer operand of a load or store instruction is
6427   // consecutive.
6428   if (auto *Ptr = getLoadStorePointerOperand(Inst))
6429     return Legal->isConsecutivePtr(Ptr);
6430   return false;
6431 }
6432 
6433 void LoopVectorizationCostModel::collectValuesToIgnore() {
6434   // Ignore ephemeral values.
6435   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
6436 
6437   // Ignore type-promoting instructions we identified during reduction
6438   // detection.
6439   for (auto &Reduction : Legal->getReductionVars()) {
6440     RecurrenceDescriptor &RedDes = Reduction.second;
6441     SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
6442     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6443   }
6444   // Ignore type-casting instructions we identified during induction
6445   // detection.
6446   for (auto &Induction : Legal->getInductionVars()) {
6447     InductionDescriptor &IndDes = Induction.second;
6448     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6449     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
6450   }
6451 }
6452 
6453 // TODO: we could return a pair of values that specify the max VF and
6454 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
6455 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
6456 // doesn't have a cost model that can choose which plan to execute if
6457 // more than one is generated.
6458 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
6459                                  LoopVectorizationCostModel &CM) {
6460   unsigned WidestType;
6461   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
6462   return WidestVectorRegBits / WidestType;
6463 }
6464 
6465 VectorizationFactor
6466 LoopVectorizationPlanner::planInVPlanNativePath(unsigned UserVF) {
6467   unsigned VF = UserVF;
6468   // Outer loop handling: They may require CFG and instruction level
6469   // transformations before even evaluating whether vectorization is profitable.
6470   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
6471   // the vectorization pipeline.
6472   if (!OrigLoop->empty()) {
6473     // If the user doesn't provide a vectorization factor, determine a
6474     // reasonable one.
6475     if (!UserVF) {
6476       VF = determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM);
6477       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
6478 
6479       // Make sure we have a VF > 1 for stress testing.
6480       if (VPlanBuildStressTest && VF < 2) {
6481         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
6482                           << "overriding computed VF.\n");
6483         VF = 4;
6484       }
6485     }
6486     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
6487     assert(isPowerOf2_32(VF) && "VF needs to be a power of two");
6488     LLVM_DEBUG(dbgs() << "LV: Using " << (UserVF ? "user " : "") << "VF " << VF
6489                       << " to build VPlans.\n");
6490     buildVPlans(VF, VF);
6491 
6492     // For VPlan build stress testing, we bail out after VPlan construction.
6493     if (VPlanBuildStressTest)
6494       return VectorizationFactor::Disabled();
6495 
6496     return {VF, 0};
6497   }
6498 
6499   LLVM_DEBUG(
6500       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
6501                 "VPlan-native path.\n");
6502   return VectorizationFactor::Disabled();
6503 }
6504 
6505 Optional<VectorizationFactor> LoopVectorizationPlanner::plan(unsigned UserVF) {
6506   assert(OrigLoop->empty() && "Inner loop expected.");
6507   Optional<unsigned> MaybeMaxVF = CM.computeMaxVF();
6508   if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved.
6509     return None;
6510 
6511   // Invalidate interleave groups if all blocks of loop will be predicated.
6512   if (CM.blockNeedsPredication(OrigLoop->getHeader()) &&
6513       !useMaskedInterleavedAccesses(*TTI)) {
6514     LLVM_DEBUG(
6515         dbgs()
6516         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
6517            "which requires masked-interleaved support.\n");
6518     CM.InterleaveInfo.reset();
6519   }
6520 
6521   if (UserVF) {
6522     LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
6523     assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
6524     // Collect the instructions (and their associated costs) that will be more
6525     // profitable to scalarize.
6526     CM.selectUserVectorizationFactor(UserVF);
6527     buildVPlansWithVPRecipes(UserVF, UserVF);
6528     LLVM_DEBUG(printPlans(dbgs()));
6529     return {{UserVF, 0}};
6530   }
6531 
6532   unsigned MaxVF = MaybeMaxVF.getValue();
6533   assert(MaxVF != 0 && "MaxVF is zero.");
6534 
6535   for (unsigned VF = 1; VF <= MaxVF; VF *= 2) {
6536     // Collect Uniform and Scalar instructions after vectorization with VF.
6537     CM.collectUniformsAndScalars(VF);
6538 
6539     // Collect the instructions (and their associated costs) that will be more
6540     // profitable to scalarize.
6541     if (VF > 1)
6542       CM.collectInstsToScalarize(VF);
6543   }
6544 
6545   buildVPlansWithVPRecipes(1, MaxVF);
6546   LLVM_DEBUG(printPlans(dbgs()));
6547   if (MaxVF == 1)
6548     return VectorizationFactor::Disabled();
6549 
6550   // Select the optimal vectorization factor.
6551   return CM.selectVectorizationFactor(MaxVF);
6552 }
6553 
6554 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) {
6555   LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF
6556                     << '\n');
6557   BestVF = VF;
6558   BestUF = UF;
6559 
6560   erase_if(VPlans, [VF](const VPlanPtr &Plan) {
6561     return !Plan->hasVF(VF);
6562   });
6563   assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
6564 }
6565 
6566 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
6567                                            DominatorTree *DT) {
6568   // Perform the actual loop transformation.
6569 
6570   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
6571   VPCallbackILV CallbackILV(ILV);
6572 
6573   VPTransformState State{BestVF, BestUF,      LI,
6574                          DT,     ILV.Builder, ILV.VectorLoopValueMap,
6575                          &ILV,   CallbackILV};
6576   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
6577   State.TripCount = ILV.getOrCreateTripCount(nullptr);
6578   State.CanonicalIV = ILV.Induction;
6579 
6580   //===------------------------------------------------===//
6581   //
6582   // Notice: any optimization or new instruction that go
6583   // into the code below should also be implemented in
6584   // the cost-model.
6585   //
6586   //===------------------------------------------------===//
6587 
6588   // 2. Copy and widen instructions from the old loop into the new loop.
6589   assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
6590   VPlans.front()->execute(&State);
6591 
6592   // 3. Fix the vectorized code: take care of header phi's, live-outs,
6593   //    predication, updating analyses.
6594   ILV.fixVectorizedLoop();
6595 }
6596 
6597 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
6598     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
6599   BasicBlock *Latch = OrigLoop->getLoopLatch();
6600 
6601   // We create new control-flow for the vectorized loop, so the original
6602   // condition will be dead after vectorization if it's only used by the
6603   // branch.
6604   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
6605   if (Cmp && Cmp->hasOneUse())
6606     DeadInstructions.insert(Cmp);
6607 
6608   // We create new "steps" for induction variable updates to which the original
6609   // induction variables map. An original update instruction will be dead if
6610   // all its users except the induction variable are dead.
6611   for (auto &Induction : Legal->getInductionVars()) {
6612     PHINode *Ind = Induction.first;
6613     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
6614     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
6615           return U == Ind || DeadInstructions.find(cast<Instruction>(U)) !=
6616                                  DeadInstructions.end();
6617         }))
6618       DeadInstructions.insert(IndUpdate);
6619 
6620     // We record as "Dead" also the type-casting instructions we had identified
6621     // during induction analysis. We don't need any handling for them in the
6622     // vectorized loop because we have proven that, under a proper runtime
6623     // test guarding the vectorized loop, the value of the phi, and the casted
6624     // value of the phi, are the same. The last instruction in this casting chain
6625     // will get its scalar/vector/widened def from the scalar/vector/widened def
6626     // of the respective phi node. Any other casts in the induction def-use chain
6627     // have no other uses outside the phi update chain, and will be ignored.
6628     InductionDescriptor &IndDes = Induction.second;
6629     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
6630     DeadInstructions.insert(Casts.begin(), Casts.end());
6631   }
6632 }
6633 
6634 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
6635 
6636 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
6637 
6638 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
6639                                         Instruction::BinaryOps BinOp) {
6640   // When unrolling and the VF is 1, we only need to add a simple scalar.
6641   Type *Ty = Val->getType();
6642   assert(!Ty->isVectorTy() && "Val must be a scalar");
6643 
6644   if (Ty->isFloatingPointTy()) {
6645     Constant *C = ConstantFP::get(Ty, (double)StartIdx);
6646 
6647     // Floating point operations had to be 'fast' to enable the unrolling.
6648     Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
6649     return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
6650   }
6651   Constant *C = ConstantInt::get(Ty, StartIdx);
6652   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
6653 }
6654 
6655 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
6656   SmallVector<Metadata *, 4> MDs;
6657   // Reserve first location for self reference to the LoopID metadata node.
6658   MDs.push_back(nullptr);
6659   bool IsUnrollMetadata = false;
6660   MDNode *LoopID = L->getLoopID();
6661   if (LoopID) {
6662     // First find existing loop unrolling disable metadata.
6663     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
6664       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
6665       if (MD) {
6666         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
6667         IsUnrollMetadata =
6668             S && S->getString().startswith("llvm.loop.unroll.disable");
6669       }
6670       MDs.push_back(LoopID->getOperand(i));
6671     }
6672   }
6673 
6674   if (!IsUnrollMetadata) {
6675     // Add runtime unroll disable metadata.
6676     LLVMContext &Context = L->getHeader()->getContext();
6677     SmallVector<Metadata *, 1> DisableOperands;
6678     DisableOperands.push_back(
6679         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
6680     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
6681     MDs.push_back(DisableNode);
6682     MDNode *NewLoopID = MDNode::get(Context, MDs);
6683     // Set operand 0 to refer to the loop id itself.
6684     NewLoopID->replaceOperandWith(0, NewLoopID);
6685     L->setLoopID(NewLoopID);
6686   }
6687 }
6688 
6689 bool LoopVectorizationPlanner::getDecisionAndClampRange(
6690     const std::function<bool(unsigned)> &Predicate, VFRange &Range) {
6691   assert(Range.End > Range.Start && "Trying to test an empty VF range.");
6692   bool PredicateAtRangeStart = Predicate(Range.Start);
6693 
6694   for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2)
6695     if (Predicate(TmpVF) != PredicateAtRangeStart) {
6696       Range.End = TmpVF;
6697       break;
6698     }
6699 
6700   return PredicateAtRangeStart;
6701 }
6702 
6703 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
6704 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
6705 /// of VF's starting at a given VF and extending it as much as possible. Each
6706 /// vectorization decision can potentially shorten this sub-range during
6707 /// buildVPlan().
6708 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) {
6709   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
6710     VFRange SubRange = {VF, MaxVF + 1};
6711     VPlans.push_back(buildVPlan(SubRange));
6712     VF = SubRange.End;
6713   }
6714 }
6715 
6716 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
6717                                          VPlanPtr &Plan) {
6718   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
6719 
6720   // Look for cached value.
6721   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
6722   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
6723   if (ECEntryIt != EdgeMaskCache.end())
6724     return ECEntryIt->second;
6725 
6726   VPValue *SrcMask = createBlockInMask(Src, Plan);
6727 
6728   // The terminator has to be a branch inst!
6729   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
6730   assert(BI && "Unexpected terminator found");
6731 
6732   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
6733     return EdgeMaskCache[Edge] = SrcMask;
6734 
6735   VPValue *EdgeMask = Plan->getVPValue(BI->getCondition());
6736   assert(EdgeMask && "No Edge Mask found for condition");
6737 
6738   if (BI->getSuccessor(0) != Dst)
6739     EdgeMask = Builder.createNot(EdgeMask);
6740 
6741   if (SrcMask) // Otherwise block in-mask is all-one, no need to AND.
6742     EdgeMask = Builder.createAnd(EdgeMask, SrcMask);
6743 
6744   return EdgeMaskCache[Edge] = EdgeMask;
6745 }
6746 
6747 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
6748   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
6749 
6750   // Look for cached value.
6751   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
6752   if (BCEntryIt != BlockMaskCache.end())
6753     return BCEntryIt->second;
6754 
6755   // All-one mask is modelled as no-mask following the convention for masked
6756   // load/store/gather/scatter. Initialize BlockMask to no-mask.
6757   VPValue *BlockMask = nullptr;
6758 
6759   if (OrigLoop->getHeader() == BB) {
6760     if (!CM.blockNeedsPredication(BB))
6761       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
6762 
6763     // Introduce the early-exit compare IV <= BTC to form header block mask.
6764     // This is used instead of IV < TC because TC may wrap, unlike BTC.
6765     // Start by constructing the desired canonical IV.
6766     VPValue *IV = nullptr;
6767     if (Legal->getPrimaryInduction())
6768       IV = Plan->getVPValue(Legal->getPrimaryInduction());
6769     else {
6770       auto IVRecipe = new VPWidenCanonicalIVRecipe();
6771       Builder.getInsertBlock()->appendRecipe(IVRecipe);
6772       IV = IVRecipe->getVPValue();
6773     }
6774     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
6775     BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
6776     return BlockMaskCache[BB] = BlockMask;
6777   }
6778 
6779   // This is the block mask. We OR all incoming edges.
6780   for (auto *Predecessor : predecessors(BB)) {
6781     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
6782     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
6783       return BlockMaskCache[BB] = EdgeMask;
6784 
6785     if (!BlockMask) { // BlockMask has its initialized nullptr value.
6786       BlockMask = EdgeMask;
6787       continue;
6788     }
6789 
6790     BlockMask = Builder.createOr(BlockMask, EdgeMask);
6791   }
6792 
6793   return BlockMaskCache[BB] = BlockMask;
6794 }
6795 
6796 VPWidenMemoryInstructionRecipe *
6797 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range,
6798                                   VPlanPtr &Plan) {
6799   if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
6800     return nullptr;
6801 
6802   auto willWiden = [&](unsigned VF) -> bool {
6803     if (VF == 1)
6804       return false;
6805     LoopVectorizationCostModel::InstWidening Decision =
6806         CM.getWideningDecision(I, VF);
6807     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
6808            "CM decision should be taken at this point.");
6809     if (Decision == LoopVectorizationCostModel::CM_Interleave)
6810       return true;
6811     if (CM.isScalarAfterVectorization(I, VF) ||
6812         CM.isProfitableToScalarize(I, VF))
6813       return false;
6814     return Decision != LoopVectorizationCostModel::CM_Scalarize;
6815   };
6816 
6817   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
6818     return nullptr;
6819 
6820   VPValue *Mask = nullptr;
6821   if (Legal->isMaskRequired(I))
6822     Mask = createBlockInMask(I->getParent(), Plan);
6823 
6824   VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I));
6825   if (LoadInst *Load = dyn_cast<LoadInst>(I))
6826     return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask);
6827 
6828   StoreInst *Store = cast<StoreInst>(I);
6829   VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand());
6830   return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask);
6831 }
6832 
6833 VPWidenIntOrFpInductionRecipe *
6834 VPRecipeBuilder::tryToOptimizeInduction(Instruction *I, VFRange &Range) {
6835   if (PHINode *Phi = dyn_cast<PHINode>(I)) {
6836     // Check if this is an integer or fp induction. If so, build the recipe that
6837     // produces its scalar and vector values.
6838     InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
6839     if (II.getKind() == InductionDescriptor::IK_IntInduction ||
6840         II.getKind() == InductionDescriptor::IK_FpInduction)
6841       return new VPWidenIntOrFpInductionRecipe(Phi);
6842 
6843     return nullptr;
6844   }
6845 
6846   // Optimize the special case where the source is a constant integer
6847   // induction variable. Notice that we can only optimize the 'trunc' case
6848   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
6849   // (c) other casts depend on pointer size.
6850 
6851   // Determine whether \p K is a truncation based on an induction variable that
6852   // can be optimized.
6853   auto isOptimizableIVTruncate =
6854       [&](Instruction *K) -> std::function<bool(unsigned)> {
6855     return
6856         [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); };
6857   };
6858 
6859   if (isa<TruncInst>(I) && LoopVectorizationPlanner::getDecisionAndClampRange(
6860                                isOptimizableIVTruncate(I), Range))
6861     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
6862                                              cast<TruncInst>(I));
6863   return nullptr;
6864 }
6865 
6866 VPBlendRecipe *VPRecipeBuilder::tryToBlend(Instruction *I, VPlanPtr &Plan) {
6867   PHINode *Phi = dyn_cast<PHINode>(I);
6868   if (!Phi || Phi->getParent() == OrigLoop->getHeader())
6869     return nullptr;
6870 
6871   // We know that all PHIs in non-header blocks are converted into selects, so
6872   // we don't have to worry about the insertion order and we can just use the
6873   // builder. At this point we generate the predication tree. There may be
6874   // duplications since this is a simple recursive scan, but future
6875   // optimizations will clean it up.
6876 
6877   SmallVector<VPValue *, 2> Operands;
6878   unsigned NumIncoming = Phi->getNumIncomingValues();
6879   for (unsigned In = 0; In < NumIncoming; In++) {
6880     VPValue *EdgeMask =
6881       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
6882     assert((EdgeMask || NumIncoming == 1) &&
6883            "Multiple predecessors with one having a full mask");
6884     Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In)));
6885     if (EdgeMask)
6886       Operands.push_back(EdgeMask);
6887   }
6888   return new VPBlendRecipe(Phi, Operands);
6889 }
6890 
6891 VPWidenCallRecipe *
6892 VPRecipeBuilder::tryToWidenCall(Instruction *I, VFRange &Range, VPlan &Plan) {
6893 
6894   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
6895       [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range);
6896 
6897   CallInst *CI = dyn_cast<CallInst>(I);
6898   if (IsPredicated || !CI)
6899     return nullptr;
6900 
6901   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
6902   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
6903              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect))
6904     return nullptr;
6905 
6906   auto willWiden = [&](unsigned VF) -> bool {
6907     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
6908     // The following case may be scalarized depending on the VF.
6909     // The flag shows whether we use Intrinsic or a usual Call for vectorized
6910     // version of the instruction.
6911     // Is it beneficial to perform intrinsic call compared to lib call?
6912     bool NeedToScalarize = false;
6913     unsigned CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
6914     bool UseVectorIntrinsic =
6915         ID && CM.getVectorIntrinsicCost(CI, VF) <= CallCost;
6916     return UseVectorIntrinsic || !NeedToScalarize;
6917   };
6918 
6919   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
6920     return nullptr;
6921 
6922   // Success: widen this call.
6923   auto VPValues = map_range(CI->arg_operands(), [&Plan](Value *Op) {
6924     return Plan.getOrAddVPValue(Op);
6925   });
6926 
6927   return new VPWidenCallRecipe(*CI, VPValues);
6928 }
6929 
6930 VPWidenSelectRecipe *VPRecipeBuilder::tryToWidenSelect(Instruction *I,
6931                                                        VFRange &Range) {
6932   auto *SI = dyn_cast<SelectInst>(I);
6933   if (!SI)
6934     return nullptr;
6935 
6936   // SI should be widened, unless it is scalar after vectorization,
6937   // scalarization is profitable or it is predicated.
6938   auto willWiden = [this, SI](unsigned VF) -> bool {
6939     return !CM.isScalarAfterVectorization(SI, VF) &&
6940            !CM.isProfitableToScalarize(SI, VF) &&
6941            !CM.isScalarWithPredication(SI, VF);
6942   };
6943   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
6944     return nullptr;
6945 
6946   auto *SE = PSE.getSE();
6947   bool InvariantCond =
6948       SE->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
6949   // Success: widen this instruction.
6950   return new VPWidenSelectRecipe(*SI, InvariantCond);
6951 }
6952 
6953 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VFRange &Range) {
6954   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
6955       [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range);
6956 
6957   if (IsPredicated)
6958     return nullptr;
6959 
6960   auto IsVectorizableOpcode = [](unsigned Opcode) {
6961     switch (Opcode) {
6962     case Instruction::Add:
6963     case Instruction::And:
6964     case Instruction::AShr:
6965     case Instruction::BitCast:
6966     case Instruction::Br:
6967     case Instruction::FAdd:
6968     case Instruction::FCmp:
6969     case Instruction::FDiv:
6970     case Instruction::FMul:
6971     case Instruction::FNeg:
6972     case Instruction::FPExt:
6973     case Instruction::FPToSI:
6974     case Instruction::FPToUI:
6975     case Instruction::FPTrunc:
6976     case Instruction::FRem:
6977     case Instruction::FSub:
6978     case Instruction::ICmp:
6979     case Instruction::IntToPtr:
6980     case Instruction::Load:
6981     case Instruction::LShr:
6982     case Instruction::Mul:
6983     case Instruction::Or:
6984     case Instruction::PHI:
6985     case Instruction::PtrToInt:
6986     case Instruction::SDiv:
6987     case Instruction::Select:
6988     case Instruction::SExt:
6989     case Instruction::Shl:
6990     case Instruction::SIToFP:
6991     case Instruction::SRem:
6992     case Instruction::Store:
6993     case Instruction::Sub:
6994     case Instruction::Trunc:
6995     case Instruction::UDiv:
6996     case Instruction::UIToFP:
6997     case Instruction::URem:
6998     case Instruction::Xor:
6999     case Instruction::ZExt:
7000       return true;
7001     }
7002     return false;
7003   };
7004 
7005   if (!IsVectorizableOpcode(I->getOpcode()))
7006     return nullptr;
7007 
7008   auto willWiden = [&](unsigned VF) -> bool {
7009     if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) ||
7010                              CM.isProfitableToScalarize(I, VF)))
7011       return false;
7012     if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
7013       assert(CM.getWideningDecision(I, VF) ==
7014                  LoopVectorizationCostModel::CM_Scalarize &&
7015              "Memory widening decisions should have been taken care by now");
7016       return false;
7017     }
7018     return true;
7019   };
7020 
7021   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
7022     return nullptr;
7023 
7024   // Success: widen this instruction.
7025   return new VPWidenRecipe(*I);
7026 }
7027 
7028 VPBasicBlock *VPRecipeBuilder::handleReplication(
7029     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
7030     DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
7031     VPlanPtr &Plan) {
7032   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
7033       [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); },
7034       Range);
7035 
7036   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
7037       [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range);
7038 
7039   auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated);
7040   setRecipe(I, Recipe);
7041 
7042   // Find if I uses a predicated instruction. If so, it will use its scalar
7043   // value. Avoid hoisting the insert-element which packs the scalar value into
7044   // a vector value, as that happens iff all users use the vector value.
7045   for (auto &Op : I->operands())
7046     if (auto *PredInst = dyn_cast<Instruction>(Op))
7047       if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
7048         PredInst2Recipe[PredInst]->setAlsoPack(false);
7049 
7050   // Finalize the recipe for Instr, first if it is not predicated.
7051   if (!IsPredicated) {
7052     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
7053     VPBB->appendRecipe(Recipe);
7054     return VPBB;
7055   }
7056   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
7057   assert(VPBB->getSuccessors().empty() &&
7058          "VPBB has successors when handling predicated replication.");
7059   // Record predicated instructions for above packing optimizations.
7060   PredInst2Recipe[I] = Recipe;
7061   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
7062   VPBlockUtils::insertBlockAfter(Region, VPBB);
7063   auto *RegSucc = new VPBasicBlock();
7064   VPBlockUtils::insertBlockAfter(RegSucc, Region);
7065   return RegSucc;
7066 }
7067 
7068 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
7069                                                       VPRecipeBase *PredRecipe,
7070                                                       VPlanPtr &Plan) {
7071   // Instructions marked for predication are replicated and placed under an
7072   // if-then construct to prevent side-effects.
7073 
7074   // Generate recipes to compute the block mask for this region.
7075   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
7076 
7077   // Build the triangular if-then region.
7078   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
7079   assert(Instr->getParent() && "Predicated instruction not in any basic block");
7080   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
7081   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
7082   auto *PHIRecipe =
7083       Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr);
7084   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
7085   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
7086   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
7087 
7088   // Note: first set Entry as region entry and then connect successors starting
7089   // from it in order, to propagate the "parent" of each VPBasicBlock.
7090   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
7091   VPBlockUtils::connectBlocks(Pred, Exit);
7092 
7093   return Region;
7094 }
7095 
7096 bool VPRecipeBuilder::tryToCreateRecipe(Instruction *Instr, VFRange &Range,
7097                                         VPlanPtr &Plan, VPBasicBlock *VPBB) {
7098   VPRecipeBase *Recipe = nullptr;
7099 
7100   // First, check for specific widening recipes that deal with calls, memory
7101   // operations, inductions and Phi nodes.
7102   if ((Recipe = tryToWidenCall(Instr, Range, *Plan)) ||
7103       (Recipe = tryToWidenMemory(Instr, Range, Plan)) ||
7104       (Recipe = tryToWidenSelect(Instr, Range)) ||
7105       (Recipe = tryToOptimizeInduction(Instr, Range)) ||
7106       (Recipe = tryToBlend(Instr, Plan)) ||
7107       (isa<PHINode>(Instr) &&
7108        (Recipe = new VPWidenPHIRecipe(cast<PHINode>(Instr))))) {
7109     setRecipe(Instr, Recipe);
7110     VPBB->appendRecipe(Recipe);
7111     return true;
7112   }
7113 
7114   // Handle GEP widening.
7115   if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Instr)) {
7116     auto Scalarize = [&](unsigned VF) {
7117       return CM.isScalarWithPredication(Instr, VF) ||
7118              CM.isScalarAfterVectorization(Instr, VF) ||
7119              CM.isProfitableToScalarize(Instr, VF);
7120     };
7121     if (LoopVectorizationPlanner::getDecisionAndClampRange(Scalarize, Range))
7122       return false;
7123     VPWidenGEPRecipe *Recipe = new VPWidenGEPRecipe(GEP, OrigLoop);
7124     setRecipe(Instr, Recipe);
7125     VPBB->appendRecipe(Recipe);
7126     return true;
7127   }
7128 
7129   // Check if Instr is to be widened by a general VPWidenRecipe, after
7130   // having first checked for specific widening recipes.
7131   if ((Recipe = tryToWiden(Instr, Range))) {
7132     setRecipe(Instr, Recipe);
7133     VPBB->appendRecipe(Recipe);
7134     return true;
7135   }
7136 
7137   return false;
7138 }
7139 
7140 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF,
7141                                                         unsigned MaxVF) {
7142   assert(OrigLoop->empty() && "Inner loop expected.");
7143 
7144   // Collect conditions feeding internal conditional branches; they need to be
7145   // represented in VPlan for it to model masking.
7146   SmallPtrSet<Value *, 1> NeedDef;
7147 
7148   auto *Latch = OrigLoop->getLoopLatch();
7149   for (BasicBlock *BB : OrigLoop->blocks()) {
7150     if (BB == Latch)
7151       continue;
7152     BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator());
7153     if (Branch && Branch->isConditional())
7154       NeedDef.insert(Branch->getCondition());
7155   }
7156 
7157   // If the tail is to be folded by masking, the primary induction variable, if
7158   // exists needs to be represented in VPlan for it to model early-exit masking.
7159   // Also, both the Phi and the live-out instruction of each reduction are
7160   // required in order to introduce a select between them in VPlan.
7161   if (CM.foldTailByMasking()) {
7162     if (Legal->getPrimaryInduction())
7163       NeedDef.insert(Legal->getPrimaryInduction());
7164     for (auto &Reduction : Legal->getReductionVars()) {
7165       NeedDef.insert(Reduction.first);
7166       NeedDef.insert(Reduction.second.getLoopExitInstr());
7167     }
7168   }
7169 
7170   // Collect instructions from the original loop that will become trivially dead
7171   // in the vectorized loop. We don't need to vectorize these instructions. For
7172   // example, original induction update instructions can become dead because we
7173   // separately emit induction "steps" when generating code for the new loop.
7174   // Similarly, we create a new latch condition when setting up the structure
7175   // of the new loop, so the old one can become dead.
7176   SmallPtrSet<Instruction *, 4> DeadInstructions;
7177   collectTriviallyDeadInstructions(DeadInstructions);
7178 
7179   // Add assume instructions we need to drop to DeadInstructions, to prevent
7180   // them from being added to the VPlan.
7181   // TODO: We only need to drop assumes in blocks that get flattend. If the
7182   // control flow is preserved, we should keep them.
7183   auto &ConditionalAssumes = Legal->getConditionalAssumes();
7184   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
7185 
7186   DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
7187   // Dead instructions do not need sinking. Remove them from SinkAfter.
7188   for (Instruction *I : DeadInstructions)
7189     SinkAfter.erase(I);
7190 
7191   for (unsigned VF = MinVF; VF < MaxVF + 1;) {
7192     VFRange SubRange = {VF, MaxVF + 1};
7193     VPlans.push_back(buildVPlanWithVPRecipes(SubRange, NeedDef,
7194                                              DeadInstructions, SinkAfter));
7195     VF = SubRange.End;
7196   }
7197 }
7198 
7199 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
7200     VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef,
7201     SmallPtrSetImpl<Instruction *> &DeadInstructions,
7202     const DenseMap<Instruction *, Instruction *> &SinkAfter) {
7203 
7204   // Hold a mapping from predicated instructions to their recipes, in order to
7205   // fix their AlsoPack behavior if a user is determined to replicate and use a
7206   // scalar instead of vector value.
7207   DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
7208 
7209   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
7210 
7211   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
7212 
7213   // ---------------------------------------------------------------------------
7214   // Pre-construction: record ingredients whose recipes we'll need to further
7215   // process after constructing the initial VPlan.
7216   // ---------------------------------------------------------------------------
7217 
7218   // Mark instructions we'll need to sink later and their targets as
7219   // ingredients whose recipe we'll need to record.
7220   for (auto &Entry : SinkAfter) {
7221     RecipeBuilder.recordRecipeOf(Entry.first);
7222     RecipeBuilder.recordRecipeOf(Entry.second);
7223   }
7224 
7225   // For each interleave group which is relevant for this (possibly trimmed)
7226   // Range, add it to the set of groups to be later applied to the VPlan and add
7227   // placeholders for its members' Recipes which we'll be replacing with a
7228   // single VPInterleaveRecipe.
7229   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
7230     auto applyIG = [IG, this](unsigned VF) -> bool {
7231       return (VF >= 2 && // Query is illegal for VF == 1
7232               CM.getWideningDecision(IG->getInsertPos(), VF) ==
7233                   LoopVectorizationCostModel::CM_Interleave);
7234     };
7235     if (!getDecisionAndClampRange(applyIG, Range))
7236       continue;
7237     InterleaveGroups.insert(IG);
7238     for (unsigned i = 0; i < IG->getFactor(); i++)
7239       if (Instruction *Member = IG->getMember(i))
7240         RecipeBuilder.recordRecipeOf(Member);
7241   };
7242 
7243   // ---------------------------------------------------------------------------
7244   // Build initial VPlan: Scan the body of the loop in a topological order to
7245   // visit each basic block after having visited its predecessor basic blocks.
7246   // ---------------------------------------------------------------------------
7247 
7248   // Create a dummy pre-entry VPBasicBlock to start building the VPlan.
7249   auto Plan = std::make_unique<VPlan>();
7250   VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
7251   Plan->setEntry(VPBB);
7252 
7253   // Represent values that will have defs inside VPlan.
7254   for (Value *V : NeedDef)
7255     Plan->addVPValue(V);
7256 
7257   // Scan the body of the loop in a topological order to visit each basic block
7258   // after having visited its predecessor basic blocks.
7259   LoopBlocksDFS DFS(OrigLoop);
7260   DFS.perform(LI);
7261 
7262   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
7263     // Relevant instructions from basic block BB will be grouped into VPRecipe
7264     // ingredients and fill a new VPBasicBlock.
7265     unsigned VPBBsForBB = 0;
7266     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
7267     VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
7268     VPBB = FirstVPBBForBB;
7269     Builder.setInsertPoint(VPBB);
7270 
7271     // Introduce each ingredient into VPlan.
7272     // TODO: Model and preserve debug instrinsics in VPlan.
7273     for (Instruction &I : BB->instructionsWithoutDebug()) {
7274       Instruction *Instr = &I;
7275 
7276       // First filter out irrelevant instructions, to ensure no recipes are
7277       // built for them.
7278       if (isa<BranchInst>(Instr) ||
7279           DeadInstructions.find(Instr) != DeadInstructions.end())
7280         continue;
7281 
7282       if (RecipeBuilder.tryToCreateRecipe(Instr, Range, Plan, VPBB))
7283         continue;
7284 
7285       // Otherwise, if all widening options failed, Instruction is to be
7286       // replicated. This may create a successor for VPBB.
7287       VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication(
7288           Instr, Range, VPBB, PredInst2Recipe, Plan);
7289       if (NextVPBB != VPBB) {
7290         VPBB = NextVPBB;
7291         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
7292                                     : "");
7293       }
7294     }
7295   }
7296 
7297   // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
7298   // may also be empty, such as the last one VPBB, reflecting original
7299   // basic-blocks with no recipes.
7300   VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
7301   assert(PreEntry->empty() && "Expecting empty pre-entry block.");
7302   VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
7303   VPBlockUtils::disconnectBlocks(PreEntry, Entry);
7304   delete PreEntry;
7305 
7306   // ---------------------------------------------------------------------------
7307   // Transform initial VPlan: Apply previously taken decisions, in order, to
7308   // bring the VPlan to its final state.
7309   // ---------------------------------------------------------------------------
7310 
7311   // Apply Sink-After legal constraints.
7312   for (auto &Entry : SinkAfter) {
7313     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
7314     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
7315     Sink->moveAfter(Target);
7316   }
7317 
7318   // Interleave memory: for each Interleave Group we marked earlier as relevant
7319   // for this VPlan, replace the Recipes widening its memory instructions with a
7320   // single VPInterleaveRecipe at its insertion point.
7321   for (auto IG : InterleaveGroups) {
7322     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
7323         RecipeBuilder.getRecipe(IG->getInsertPos()));
7324     (new VPInterleaveRecipe(IG, Recipe->getAddr(), Recipe->getMask()))
7325         ->insertBefore(Recipe);
7326 
7327     for (unsigned i = 0; i < IG->getFactor(); ++i)
7328       if (Instruction *Member = IG->getMember(i)) {
7329         RecipeBuilder.getRecipe(Member)->eraseFromParent();
7330       }
7331   }
7332 
7333   // Finally, if tail is folded by masking, introduce selects between the phi
7334   // and the live-out instruction of each reduction, at the end of the latch.
7335   if (CM.foldTailByMasking()) {
7336     Builder.setInsertPoint(VPBB);
7337     auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
7338     for (auto &Reduction : Legal->getReductionVars()) {
7339       VPValue *Phi = Plan->getVPValue(Reduction.first);
7340       VPValue *Red = Plan->getVPValue(Reduction.second.getLoopExitInstr());
7341       Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi});
7342     }
7343   }
7344 
7345   std::string PlanName;
7346   raw_string_ostream RSO(PlanName);
7347   unsigned VF = Range.Start;
7348   Plan->addVF(VF);
7349   RSO << "Initial VPlan for VF={" << VF;
7350   for (VF *= 2; VF < Range.End; VF *= 2) {
7351     Plan->addVF(VF);
7352     RSO << "," << VF;
7353   }
7354   RSO << "},UF>=1";
7355   RSO.flush();
7356   Plan->setName(PlanName);
7357 
7358   return Plan;
7359 }
7360 
7361 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
7362   // Outer loop handling: They may require CFG and instruction level
7363   // transformations before even evaluating whether vectorization is profitable.
7364   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7365   // the vectorization pipeline.
7366   assert(!OrigLoop->empty());
7367   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7368 
7369   // Create new empty VPlan
7370   auto Plan = std::make_unique<VPlan>();
7371 
7372   // Build hierarchical CFG
7373   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
7374   HCFGBuilder.buildHierarchicalCFG();
7375 
7376   for (unsigned VF = Range.Start; VF < Range.End; VF *= 2)
7377     Plan->addVF(VF);
7378 
7379   if (EnableVPlanPredication) {
7380     VPlanPredicator VPP(*Plan);
7381     VPP.predicate();
7382 
7383     // Avoid running transformation to recipes until masked code generation in
7384     // VPlan-native path is in place.
7385     return Plan;
7386   }
7387 
7388   SmallPtrSet<Instruction *, 1> DeadInstructions;
7389   VPlanTransforms::VPInstructionsToVPRecipes(
7390       OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions);
7391   return Plan;
7392 }
7393 
7394 Value* LoopVectorizationPlanner::VPCallbackILV::
7395 getOrCreateVectorValues(Value *V, unsigned Part) {
7396       return ILV.getOrCreateVectorValue(V, Part);
7397 }
7398 
7399 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue(
7400     Value *V, const VPIteration &Instance) {
7401   return ILV.getOrCreateScalarValue(V, Instance);
7402 }
7403 
7404 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
7405                                VPSlotTracker &SlotTracker) const {
7406   O << " +\n"
7407     << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
7408   IG->getInsertPos()->printAsOperand(O, false);
7409   O << ", ";
7410   getAddr()->printAsOperand(O, SlotTracker);
7411   VPValue *Mask = getMask();
7412   if (Mask) {
7413     O << ", ";
7414     Mask->printAsOperand(O, SlotTracker);
7415   }
7416   O << "\\l\"";
7417   for (unsigned i = 0; i < IG->getFactor(); ++i)
7418     if (Instruction *I = IG->getMember(i))
7419       O << " +\n"
7420         << Indent << "\"  " << VPlanIngredient(I) << " " << i << "\\l\"";
7421 }
7422 
7423 void VPWidenCallRecipe::execute(VPTransformState &State) {
7424   State.ILV->widenCallInstruction(Ingredient, User, State);
7425 }
7426 
7427 void VPWidenSelectRecipe::execute(VPTransformState &State) {
7428   State.ILV->widenSelectInstruction(Ingredient, InvariantCond);
7429 }
7430 
7431 void VPWidenRecipe::execute(VPTransformState &State) {
7432   State.ILV->widenInstruction(Ingredient);
7433 }
7434 
7435 void VPWidenGEPRecipe::execute(VPTransformState &State) {
7436   State.ILV->widenGEP(GEP, State.UF, State.VF, IsPtrLoopInvariant,
7437                       IsIndexLoopInvariant);
7438 }
7439 
7440 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
7441   assert(!State.Instance && "Int or FP induction being replicated.");
7442   State.ILV->widenIntOrFpInduction(IV, Trunc);
7443 }
7444 
7445 void VPWidenPHIRecipe::execute(VPTransformState &State) {
7446   State.ILV->widenPHIInstruction(Phi, State.UF, State.VF);
7447 }
7448 
7449 void VPBlendRecipe::execute(VPTransformState &State) {
7450   State.ILV->setDebugLocFromInst(State.Builder, Phi);
7451   // We know that all PHIs in non-header blocks are converted into
7452   // selects, so we don't have to worry about the insertion order and we
7453   // can just use the builder.
7454   // At this point we generate the predication tree. There may be
7455   // duplications since this is a simple recursive scan, but future
7456   // optimizations will clean it up.
7457 
7458   unsigned NumIncoming = getNumIncomingValues();
7459 
7460   // Generate a sequence of selects of the form:
7461   // SELECT(Mask3, In3,
7462   //      SELECT(Mask2, In2,
7463   //                   ( ...)))
7464   InnerLoopVectorizer::VectorParts Entry(State.UF);
7465   for (unsigned In = 0; In < NumIncoming; ++In) {
7466     for (unsigned Part = 0; Part < State.UF; ++Part) {
7467       // We might have single edge PHIs (blocks) - use an identity
7468       // 'select' for the first PHI operand.
7469       Value *In0 = State.get(getIncomingValue(In), Part);
7470       if (In == 0)
7471         Entry[Part] = In0; // Initialize with the first incoming value.
7472       else {
7473         // Select between the current value and the previous incoming edge
7474         // based on the incoming mask.
7475         Value *Cond = State.get(getMask(In), Part);
7476         Entry[Part] =
7477             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
7478       }
7479     }
7480   }
7481   for (unsigned Part = 0; Part < State.UF; ++Part)
7482     State.ValueMap.setVectorValue(Phi, Part, Entry[Part]);
7483 }
7484 
7485 void VPInterleaveRecipe::execute(VPTransformState &State) {
7486   assert(!State.Instance && "Interleave group being replicated.");
7487   State.ILV->vectorizeInterleaveGroup(IG, State, getAddr(), getMask());
7488 }
7489 
7490 void VPReplicateRecipe::execute(VPTransformState &State) {
7491   if (State.Instance) { // Generate a single instance.
7492     State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated);
7493     // Insert scalar instance packing it into a vector.
7494     if (AlsoPack && State.VF > 1) {
7495       // If we're constructing lane 0, initialize to start from undef.
7496       if (State.Instance->Lane == 0) {
7497         Value *Undef =
7498             UndefValue::get(VectorType::get(Ingredient->getType(), State.VF));
7499         State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef);
7500       }
7501       State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance);
7502     }
7503     return;
7504   }
7505 
7506   // Generate scalar instances for all VF lanes of all UF parts, unless the
7507   // instruction is uniform inwhich case generate only the first lane for each
7508   // of the UF parts.
7509   unsigned EndLane = IsUniform ? 1 : State.VF;
7510   for (unsigned Part = 0; Part < State.UF; ++Part)
7511     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
7512       State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated);
7513 }
7514 
7515 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
7516   assert(State.Instance && "Branch on Mask works only on single instance.");
7517 
7518   unsigned Part = State.Instance->Part;
7519   unsigned Lane = State.Instance->Lane;
7520 
7521   Value *ConditionBit = nullptr;
7522   if (!User) // Block in mask is all-one.
7523     ConditionBit = State.Builder.getTrue();
7524   else {
7525     VPValue *BlockInMask = User->getOperand(0);
7526     ConditionBit = State.get(BlockInMask, Part);
7527     if (ConditionBit->getType()->isVectorTy())
7528       ConditionBit = State.Builder.CreateExtractElement(
7529           ConditionBit, State.Builder.getInt32(Lane));
7530   }
7531 
7532   // Replace the temporary unreachable terminator with a new conditional branch,
7533   // whose two destinations will be set later when they are created.
7534   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
7535   assert(isa<UnreachableInst>(CurrentTerminator) &&
7536          "Expected to replace unreachable terminator with conditional branch.");
7537   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
7538   CondBr->setSuccessor(0, nullptr);
7539   ReplaceInstWithInst(CurrentTerminator, CondBr);
7540 }
7541 
7542 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
7543   assert(State.Instance && "Predicated instruction PHI works per instance.");
7544   Instruction *ScalarPredInst = cast<Instruction>(
7545       State.ValueMap.getScalarValue(PredInst, *State.Instance));
7546   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
7547   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
7548   assert(PredicatingBB && "Predicated block has no single predecessor.");
7549 
7550   // By current pack/unpack logic we need to generate only a single phi node: if
7551   // a vector value for the predicated instruction exists at this point it means
7552   // the instruction has vector users only, and a phi for the vector value is
7553   // needed. In this case the recipe of the predicated instruction is marked to
7554   // also do that packing, thereby "hoisting" the insert-element sequence.
7555   // Otherwise, a phi node for the scalar value is needed.
7556   unsigned Part = State.Instance->Part;
7557   if (State.ValueMap.hasVectorValue(PredInst, Part)) {
7558     Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part);
7559     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
7560     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
7561     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
7562     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
7563     State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache.
7564   } else {
7565     Type *PredInstType = PredInst->getType();
7566     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
7567     Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB);
7568     Phi->addIncoming(ScalarPredInst, PredicatedBB);
7569     State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi);
7570   }
7571 }
7572 
7573 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
7574   VPValue *StoredValue = isa<StoreInst>(Instr) ? getStoredValue() : nullptr;
7575   State.ILV->vectorizeMemoryInstruction(&Instr, State, getAddr(), StoredValue,
7576                                         getMask());
7577 }
7578 
7579 // Determine how to lower the scalar epilogue, which depends on 1) optimising
7580 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
7581 // predication, and 4) a TTI hook that analyses whether the loop is suitable
7582 // for predication.
7583 static ScalarEpilogueLowering getScalarEpilogueLowering(
7584     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
7585     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
7586     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
7587     LoopVectorizationLegality &LVL) {
7588   bool OptSize =
7589       F->hasOptSize() || llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
7590                                                      PGSOQueryType::IRPass);
7591   // 1) OptSize takes precedence over all other options, i.e. if this is set,
7592   // don't look at hints or options, and don't request a scalar epilogue.
7593   if (OptSize && Hints.getForce() != LoopVectorizeHints::FK_Enabled)
7594     return CM_ScalarEpilogueNotAllowedOptSize;
7595 
7596   bool PredicateOptDisabled = PreferPredicateOverEpilog.getNumOccurrences() &&
7597                               !PreferPredicateOverEpilog;
7598 
7599   // 2) Next, if disabling predication is requested on the command line, honour
7600   // this and request a scalar epilogue.
7601   if (PredicateOptDisabled)
7602     return CM_ScalarEpilogueAllowed;
7603 
7604   // 3) and 4) look if enabling predication is requested on the command line,
7605   // with a loop hint, or if the TTI hook indicates this is profitable, request
7606   // predication .
7607   if (PreferPredicateOverEpilog ||
7608       Hints.getPredicate() == LoopVectorizeHints::FK_Enabled ||
7609       (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
7610                                         LVL.getLAI()) &&
7611        Hints.getPredicate() != LoopVectorizeHints::FK_Disabled))
7612     return CM_ScalarEpilogueNotNeededUsePredicate;
7613 
7614   return CM_ScalarEpilogueAllowed;
7615 }
7616 
7617 // Process the loop in the VPlan-native vectorization path. This path builds
7618 // VPlan upfront in the vectorization pipeline, which allows to apply
7619 // VPlan-to-VPlan transformations from the very beginning without modifying the
7620 // input LLVM IR.
7621 static bool processLoopInVPlanNativePath(
7622     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
7623     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
7624     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
7625     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
7626     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) {
7627 
7628   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
7629   Function *F = L->getHeader()->getParent();
7630   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
7631 
7632   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
7633       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
7634 
7635   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
7636                                 &Hints, IAI);
7637   // Use the planner for outer loop vectorization.
7638   // TODO: CM is not used at this point inside the planner. Turn CM into an
7639   // optional argument if we don't need it in the future.
7640   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE);
7641 
7642   // Get user vectorization factor.
7643   const unsigned UserVF = Hints.getWidth();
7644 
7645   // Plan how to best vectorize, return the best VF and its cost.
7646   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
7647 
7648   // If we are stress testing VPlan builds, do not attempt to generate vector
7649   // code. Masked vector code generation support will follow soon.
7650   // Also, do not attempt to vectorize if no vector code will be produced.
7651   if (VPlanBuildStressTest || EnableVPlanPredication ||
7652       VectorizationFactor::Disabled() == VF)
7653     return false;
7654 
7655   LVP.setBestPlan(VF.Width, 1);
7656 
7657   InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
7658                          &CM);
7659   LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
7660                     << L->getHeader()->getParent()->getName() << "\"\n");
7661   LVP.executePlan(LB, DT);
7662 
7663   // Mark the loop as already vectorized to avoid vectorizing again.
7664   Hints.setAlreadyVectorized();
7665 
7666   LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent()));
7667   return true;
7668 }
7669 
7670 bool LoopVectorizePass::processLoop(Loop *L) {
7671   assert((EnableVPlanNativePath || L->empty()) &&
7672          "VPlan-native path is not enabled. Only process inner loops.");
7673 
7674 #ifndef NDEBUG
7675   const std::string DebugLocStr = getDebugLocString(L);
7676 #endif /* NDEBUG */
7677 
7678   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
7679                     << L->getHeader()->getParent()->getName() << "\" from "
7680                     << DebugLocStr << "\n");
7681 
7682   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
7683 
7684   LLVM_DEBUG(
7685       dbgs() << "LV: Loop hints:"
7686              << " force="
7687              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
7688                      ? "disabled"
7689                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
7690                             ? "enabled"
7691                             : "?"))
7692              << " width=" << Hints.getWidth()
7693              << " unroll=" << Hints.getInterleave() << "\n");
7694 
7695   // Function containing loop
7696   Function *F = L->getHeader()->getParent();
7697 
7698   // Looking at the diagnostic output is the only way to determine if a loop
7699   // was vectorized (other than looking at the IR or machine code), so it
7700   // is important to generate an optimization remark for each loop. Most of
7701   // these messages are generated as OptimizationRemarkAnalysis. Remarks
7702   // generated as OptimizationRemark and OptimizationRemarkMissed are
7703   // less verbose reporting vectorized loops and unvectorized loops that may
7704   // benefit from vectorization, respectively.
7705 
7706   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
7707     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
7708     return false;
7709   }
7710 
7711   PredicatedScalarEvolution PSE(*SE, *L);
7712 
7713   // Check if it is legal to vectorize the loop.
7714   LoopVectorizationRequirements Requirements(*ORE);
7715   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
7716                                 &Requirements, &Hints, DB, AC);
7717   if (!LVL.canVectorize(EnableVPlanNativePath)) {
7718     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
7719     Hints.emitRemarkWithHints();
7720     return false;
7721   }
7722 
7723   // Check the function attributes and profiles to find out if this function
7724   // should be optimized for size.
7725   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
7726       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
7727 
7728   // Entrance to the VPlan-native vectorization path. Outer loops are processed
7729   // here. They may require CFG and instruction level transformations before
7730   // even evaluating whether vectorization is profitable. Since we cannot modify
7731   // the incoming IR, we need to build VPlan upfront in the vectorization
7732   // pipeline.
7733   if (!L->empty())
7734     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
7735                                         ORE, BFI, PSI, Hints);
7736 
7737   assert(L->empty() && "Inner loop expected.");
7738 
7739   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
7740   // count by optimizing for size, to minimize overheads.
7741   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
7742   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
7743     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
7744                       << "This loop is worth vectorizing only if no scalar "
7745                       << "iteration overheads are incurred.");
7746     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
7747       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
7748     else {
7749       LLVM_DEBUG(dbgs() << "\n");
7750       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
7751     }
7752   }
7753 
7754   // Check the function attributes to see if implicit floats are allowed.
7755   // FIXME: This check doesn't seem possibly correct -- what if the loop is
7756   // an integer loop and the vector instructions selected are purely integer
7757   // vector instructions?
7758   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
7759     reportVectorizationFailure(
7760         "Can't vectorize when the NoImplicitFloat attribute is used",
7761         "loop not vectorized due to NoImplicitFloat attribute",
7762         "NoImplicitFloat", ORE, L);
7763     Hints.emitRemarkWithHints();
7764     return false;
7765   }
7766 
7767   // Check if the target supports potentially unsafe FP vectorization.
7768   // FIXME: Add a check for the type of safety issue (denormal, signaling)
7769   // for the target we're vectorizing for, to make sure none of the
7770   // additional fp-math flags can help.
7771   if (Hints.isPotentiallyUnsafe() &&
7772       TTI->isFPVectorizationPotentiallyUnsafe()) {
7773     reportVectorizationFailure(
7774         "Potentially unsafe FP op prevents vectorization",
7775         "loop not vectorized due to unsafe FP support.",
7776         "UnsafeFP", ORE, L);
7777     Hints.emitRemarkWithHints();
7778     return false;
7779   }
7780 
7781   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
7782   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
7783 
7784   // If an override option has been passed in for interleaved accesses, use it.
7785   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
7786     UseInterleaved = EnableInterleavedMemAccesses;
7787 
7788   // Analyze interleaved memory accesses.
7789   if (UseInterleaved) {
7790     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
7791   }
7792 
7793   // Use the cost model.
7794   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
7795                                 F, &Hints, IAI);
7796   CM.collectValuesToIgnore();
7797 
7798   // Use the planner for vectorization.
7799   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE);
7800 
7801   // Get user vectorization factor.
7802   unsigned UserVF = Hints.getWidth();
7803 
7804   // Plan how to best vectorize, return the best VF and its cost.
7805   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF);
7806 
7807   VectorizationFactor VF = VectorizationFactor::Disabled();
7808   unsigned IC = 1;
7809   unsigned UserIC = Hints.getInterleave();
7810 
7811   if (MaybeVF) {
7812     VF = *MaybeVF;
7813     // Select the interleave count.
7814     IC = CM.selectInterleaveCount(VF.Width, VF.Cost);
7815   }
7816 
7817   // Identify the diagnostic messages that should be produced.
7818   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
7819   bool VectorizeLoop = true, InterleaveLoop = true;
7820   if (Requirements.doesNotMeet(F, L, Hints)) {
7821     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
7822                          "requirements.\n");
7823     Hints.emitRemarkWithHints();
7824     return false;
7825   }
7826 
7827   if (VF.Width == 1) {
7828     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
7829     VecDiagMsg = std::make_pair(
7830         "VectorizationNotBeneficial",
7831         "the cost-model indicates that vectorization is not beneficial");
7832     VectorizeLoop = false;
7833   }
7834 
7835   if (!MaybeVF && UserIC > 1) {
7836     // Tell the user interleaving was avoided up-front, despite being explicitly
7837     // requested.
7838     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
7839                          "interleaving should be avoided up front\n");
7840     IntDiagMsg = std::make_pair(
7841         "InterleavingAvoided",
7842         "Ignoring UserIC, because interleaving was avoided up front");
7843     InterleaveLoop = false;
7844   } else if (IC == 1 && UserIC <= 1) {
7845     // Tell the user interleaving is not beneficial.
7846     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
7847     IntDiagMsg = std::make_pair(
7848         "InterleavingNotBeneficial",
7849         "the cost-model indicates that interleaving is not beneficial");
7850     InterleaveLoop = false;
7851     if (UserIC == 1) {
7852       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
7853       IntDiagMsg.second +=
7854           " and is explicitly disabled or interleave count is set to 1";
7855     }
7856   } else if (IC > 1 && UserIC == 1) {
7857     // Tell the user interleaving is beneficial, but it explicitly disabled.
7858     LLVM_DEBUG(
7859         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
7860     IntDiagMsg = std::make_pair(
7861         "InterleavingBeneficialButDisabled",
7862         "the cost-model indicates that interleaving is beneficial "
7863         "but is explicitly disabled or interleave count is set to 1");
7864     InterleaveLoop = false;
7865   }
7866 
7867   // Override IC if user provided an interleave count.
7868   IC = UserIC > 0 ? UserIC : IC;
7869 
7870   // Emit diagnostic messages, if any.
7871   const char *VAPassName = Hints.vectorizeAnalysisPassName();
7872   if (!VectorizeLoop && !InterleaveLoop) {
7873     // Do not vectorize or interleaving the loop.
7874     ORE->emit([&]() {
7875       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
7876                                       L->getStartLoc(), L->getHeader())
7877              << VecDiagMsg.second;
7878     });
7879     ORE->emit([&]() {
7880       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
7881                                       L->getStartLoc(), L->getHeader())
7882              << IntDiagMsg.second;
7883     });
7884     return false;
7885   } else if (!VectorizeLoop && InterleaveLoop) {
7886     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7887     ORE->emit([&]() {
7888       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
7889                                         L->getStartLoc(), L->getHeader())
7890              << VecDiagMsg.second;
7891     });
7892   } else if (VectorizeLoop && !InterleaveLoop) {
7893     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
7894                       << ") in " << DebugLocStr << '\n');
7895     ORE->emit([&]() {
7896       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
7897                                         L->getStartLoc(), L->getHeader())
7898              << IntDiagMsg.second;
7899     });
7900   } else if (VectorizeLoop && InterleaveLoop) {
7901     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
7902                       << ") in " << DebugLocStr << '\n');
7903     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
7904   }
7905 
7906   LVP.setBestPlan(VF.Width, IC);
7907 
7908   using namespace ore;
7909   bool DisableRuntimeUnroll = false;
7910   MDNode *OrigLoopID = L->getLoopID();
7911 
7912   if (!VectorizeLoop) {
7913     assert(IC > 1 && "interleave count should not be 1 or 0");
7914     // If we decided that it is not legal to vectorize the loop, then
7915     // interleave it.
7916     InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
7917                                &CM);
7918     LVP.executePlan(Unroller, DT);
7919 
7920     ORE->emit([&]() {
7921       return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
7922                                 L->getHeader())
7923              << "interleaved loop (interleaved count: "
7924              << NV("InterleaveCount", IC) << ")";
7925     });
7926   } else {
7927     // If we decided that it is *legal* to vectorize the loop, then do it.
7928     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
7929                            &LVL, &CM);
7930     LVP.executePlan(LB, DT);
7931     ++LoopsVectorized;
7932 
7933     // Add metadata to disable runtime unrolling a scalar loop when there are
7934     // no runtime checks about strides and memory. A scalar loop that is
7935     // rarely used is not worth unrolling.
7936     if (!LB.areSafetyChecksAdded())
7937       DisableRuntimeUnroll = true;
7938 
7939     // Report the vectorization decision.
7940     ORE->emit([&]() {
7941       return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
7942                                 L->getHeader())
7943              << "vectorized loop (vectorization width: "
7944              << NV("VectorizationFactor", VF.Width)
7945              << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
7946     });
7947   }
7948 
7949   Optional<MDNode *> RemainderLoopID =
7950       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
7951                                       LLVMLoopVectorizeFollowupEpilogue});
7952   if (RemainderLoopID.hasValue()) {
7953     L->setLoopID(RemainderLoopID.getValue());
7954   } else {
7955     if (DisableRuntimeUnroll)
7956       AddRuntimeUnrollDisableMetaData(L);
7957 
7958     // Mark the loop as already vectorized to avoid vectorizing again.
7959     Hints.setAlreadyVectorized();
7960   }
7961 
7962   LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent()));
7963   return true;
7964 }
7965 
7966 bool LoopVectorizePass::runImpl(
7967     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
7968     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
7969     DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_,
7970     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
7971     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
7972   SE = &SE_;
7973   LI = &LI_;
7974   TTI = &TTI_;
7975   DT = &DT_;
7976   BFI = &BFI_;
7977   TLI = TLI_;
7978   AA = &AA_;
7979   AC = &AC_;
7980   GetLAA = &GetLAA_;
7981   DB = &DB_;
7982   ORE = &ORE_;
7983   PSI = PSI_;
7984 
7985   // Don't attempt if
7986   // 1. the target claims to have no vector registers, and
7987   // 2. interleaving won't help ILP.
7988   //
7989   // The second condition is necessary because, even if the target has no
7990   // vector registers, loop vectorization may still enable scalar
7991   // interleaving.
7992   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
7993       TTI->getMaxInterleaveFactor(1) < 2)
7994     return false;
7995 
7996   bool Changed = false;
7997 
7998   // The vectorizer requires loops to be in simplified form.
7999   // Since simplification may add new inner loops, it has to run before the
8000   // legality and profitability checks. This means running the loop vectorizer
8001   // will simplify all loops, regardless of whether anything end up being
8002   // vectorized.
8003   for (auto &L : *LI)
8004     Changed |=
8005         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
8006 
8007   // Build up a worklist of inner-loops to vectorize. This is necessary as
8008   // the act of vectorizing or partially unrolling a loop creates new loops
8009   // and can invalidate iterators across the loops.
8010   SmallVector<Loop *, 8> Worklist;
8011 
8012   for (Loop *L : *LI)
8013     collectSupportedLoops(*L, LI, ORE, Worklist);
8014 
8015   LoopsAnalyzed += Worklist.size();
8016 
8017   // Now walk the identified inner loops.
8018   while (!Worklist.empty()) {
8019     Loop *L = Worklist.pop_back_val();
8020 
8021     // For the inner loops we actually process, form LCSSA to simplify the
8022     // transform.
8023     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
8024 
8025     Changed |= processLoop(L);
8026   }
8027 
8028   // Process each loop nest in the function.
8029   return Changed;
8030 }
8031 
8032 PreservedAnalyses LoopVectorizePass::run(Function &F,
8033                                          FunctionAnalysisManager &AM) {
8034     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
8035     auto &LI = AM.getResult<LoopAnalysis>(F);
8036     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
8037     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
8038     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
8039     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
8040     auto &AA = AM.getResult<AAManager>(F);
8041     auto &AC = AM.getResult<AssumptionAnalysis>(F);
8042     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
8043     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
8044     MemorySSA *MSSA = EnableMSSALoopDependency
8045                           ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
8046                           : nullptr;
8047 
8048     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
8049     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
8050         [&](Loop &L) -> const LoopAccessInfo & {
8051       LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, MSSA};
8052       return LAM.getResult<LoopAccessAnalysis>(L, AR);
8053     };
8054     const ModuleAnalysisManager &MAM =
8055         AM.getResult<ModuleAnalysisManagerFunctionProxy>(F).getManager();
8056     ProfileSummaryInfo *PSI =
8057         MAM.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
8058     bool Changed =
8059         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
8060     if (!Changed)
8061       return PreservedAnalyses::all();
8062     PreservedAnalyses PA;
8063 
8064     // We currently do not preserve loopinfo/dominator analyses with outer loop
8065     // vectorization. Until this is addressed, mark these analyses as preserved
8066     // only for non-VPlan-native path.
8067     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
8068     if (!EnableVPlanNativePath) {
8069       PA.preserve<LoopAnalysis>();
8070       PA.preserve<DominatorTreeAnalysis>();
8071     }
8072     PA.preserve<BasicAA>();
8073     PA.preserve<GlobalsAA>();
8074     return PA;
8075 }
8076