1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
11 // and generates target-independent LLVM-IR.
12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
13 // of instructions in order to estimate the profitability of vectorization.
14 //
15 // The loop vectorizer combines consecutive loop iterations into a single
16 // 'wide' iteration. After this transformation the index is incremented
17 // by the SIMD vector width, and not by one.
18 //
19 // This pass has three parts:
20 // 1. The main loop pass that drives the different parts.
21 // 2. LoopVectorizationLegality - A unit that checks for the legality
22 //    of the vectorization.
23 // 3. InnerLoopVectorizer - A unit that performs the actual
24 //    widening of instructions.
25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
26 //    of vectorization. It decides on the optimal vector width, which
27 //    can be one, if vectorization is not profitable.
28 //
29 //===----------------------------------------------------------------------===//
30 //
31 // The reduction-variable vectorization is based on the paper:
32 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
33 //
34 // Variable uniformity checks are inspired by:
35 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
36 //
37 // The interleaved access vectorization is based on the paper:
38 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
39 //  Data for SIMD
40 //
41 // Other ideas/concepts are from:
42 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
43 //
44 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
45 //  Vectorizing Compilers.
46 //
47 //===----------------------------------------------------------------------===//
48 
49 #include "llvm/ADT/DenseMap.h"
50 #include "llvm/ADT/Hashing.h"
51 #include "llvm/ADT/MapVector.h"
52 #include "llvm/ADT/SetVector.h"
53 #include "llvm/ADT/SmallPtrSet.h"
54 #include "llvm/ADT/SmallSet.h"
55 #include "llvm/ADT/SmallVector.h"
56 #include "llvm/ADT/Statistic.h"
57 #include "llvm/ADT/StringExtras.h"
58 #include "llvm/Analysis/AliasAnalysis.h"
59 #include "llvm/Analysis/AssumptionCache.h"
60 #include "llvm/Analysis/BasicAliasAnalysis.h"
61 #include "llvm/Analysis/BlockFrequencyInfo.h"
62 #include "llvm/Analysis/CodeMetrics.h"
63 #include "llvm/Analysis/DemandedBits.h"
64 #include "llvm/Analysis/GlobalsModRef.h"
65 #include "llvm/Analysis/LoopAccessAnalysis.h"
66 #include "llvm/Analysis/LoopInfo.h"
67 #include "llvm/Analysis/LoopIterator.h"
68 #include "llvm/Analysis/LoopPass.h"
69 #include "llvm/Analysis/ScalarEvolution.h"
70 #include "llvm/Analysis/ScalarEvolutionExpander.h"
71 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
72 #include "llvm/Analysis/TargetTransformInfo.h"
73 #include "llvm/Analysis/ValueTracking.h"
74 #include "llvm/Analysis/VectorUtils.h"
75 #include "llvm/IR/Constants.h"
76 #include "llvm/IR/DataLayout.h"
77 #include "llvm/IR/DebugInfo.h"
78 #include "llvm/IR/DerivedTypes.h"
79 #include "llvm/IR/DiagnosticInfo.h"
80 #include "llvm/IR/Dominators.h"
81 #include "llvm/IR/Function.h"
82 #include "llvm/IR/IRBuilder.h"
83 #include "llvm/IR/Instructions.h"
84 #include "llvm/IR/IntrinsicInst.h"
85 #include "llvm/IR/LLVMContext.h"
86 #include "llvm/IR/Module.h"
87 #include "llvm/IR/PatternMatch.h"
88 #include "llvm/IR/Type.h"
89 #include "llvm/IR/Value.h"
90 #include "llvm/IR/ValueHandle.h"
91 #include "llvm/IR/Verifier.h"
92 #include "llvm/Pass.h"
93 #include "llvm/Support/BranchProbability.h"
94 #include "llvm/Support/CommandLine.h"
95 #include "llvm/Support/Debug.h"
96 #include "llvm/Support/raw_ostream.h"
97 #include "llvm/Transforms/Scalar.h"
98 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
99 #include "llvm/Transforms/Utils/Local.h"
100 #include "llvm/Transforms/Utils/LoopUtils.h"
101 #include "llvm/Transforms/Utils/LoopVersioning.h"
102 #include "llvm/Transforms/Vectorize.h"
103 #include <algorithm>
104 #include <functional>
105 #include <map>
106 #include <tuple>
107 
108 using namespace llvm;
109 using namespace llvm::PatternMatch;
110 
111 #define LV_NAME "loop-vectorize"
112 #define DEBUG_TYPE LV_NAME
113 
114 STATISTIC(LoopsVectorized, "Number of loops vectorized");
115 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
116 
117 static cl::opt<bool>
118     EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden,
119                        cl::desc("Enable if-conversion during vectorization."));
120 
121 /// We don't vectorize loops with a known constant trip count below this number.
122 static cl::opt<unsigned> TinyTripCountVectorThreshold(
123     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
124     cl::desc("Don't vectorize loops with a constant "
125              "trip count that is smaller than this "
126              "value."));
127 
128 static cl::opt<bool> MaximizeBandwidth(
129     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
130     cl::desc("Maximize bandwidth when selecting vectorization factor which "
131              "will be determined by the smallest type in loop."));
132 
133 /// This enables versioning on the strides of symbolically striding memory
134 /// accesses in code like the following.
135 ///   for (i = 0; i < N; ++i)
136 ///     A[i * Stride1] += B[i * Stride2] ...
137 ///
138 /// Will be roughly translated to
139 ///    if (Stride1 == 1 && Stride2 == 1) {
140 ///      for (i = 0; i < N; i+=4)
141 ///       A[i:i+3] += ...
142 ///    } else
143 ///      ...
144 static cl::opt<bool> EnableMemAccessVersioning(
145     "enable-mem-access-versioning", cl::init(true), cl::Hidden,
146     cl::desc("Enable symbolic stride memory access versioning"));
147 
148 static cl::opt<bool> EnableInterleavedMemAccesses(
149     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
150     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
151 
152 /// Maximum factor for an interleaved memory access.
153 static cl::opt<unsigned> MaxInterleaveGroupFactor(
154     "max-interleave-group-factor", cl::Hidden,
155     cl::desc("Maximum factor for an interleaved access group (default = 8)"),
156     cl::init(8));
157 
158 /// We don't interleave loops with a known constant trip count below this
159 /// number.
160 static const unsigned TinyTripCountInterleaveThreshold = 128;
161 
162 static cl::opt<unsigned> ForceTargetNumScalarRegs(
163     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
164     cl::desc("A flag that overrides the target's number of scalar registers."));
165 
166 static cl::opt<unsigned> ForceTargetNumVectorRegs(
167     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
168     cl::desc("A flag that overrides the target's number of vector registers."));
169 
170 /// Maximum vectorization interleave count.
171 static const unsigned MaxInterleaveFactor = 16;
172 
173 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
174     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
175     cl::desc("A flag that overrides the target's max interleave factor for "
176              "scalar loops."));
177 
178 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
179     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
180     cl::desc("A flag that overrides the target's max interleave factor for "
181              "vectorized loops."));
182 
183 static cl::opt<unsigned> ForceTargetInstructionCost(
184     "force-target-instruction-cost", cl::init(0), cl::Hidden,
185     cl::desc("A flag that overrides the target's expected cost for "
186              "an instruction to a single constant value. Mostly "
187              "useful for getting consistent testing."));
188 
189 static cl::opt<unsigned> SmallLoopCost(
190     "small-loop-cost", cl::init(20), cl::Hidden,
191     cl::desc(
192         "The cost of a loop that is considered 'small' by the interleaver."));
193 
194 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
195     "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden,
196     cl::desc("Enable the use of the block frequency analysis to access PGO "
197              "heuristics minimizing code growth in cold regions and being more "
198              "aggressive in hot regions."));
199 
200 // Runtime interleave loops for load/store throughput.
201 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
202     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
203     cl::desc(
204         "Enable runtime interleaving until load/store ports are saturated"));
205 
206 /// The number of stores in a loop that are allowed to need predication.
207 static cl::opt<unsigned> NumberOfStoresToPredicate(
208     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
209     cl::desc("Max number of stores to be predicated behind an if."));
210 
211 static cl::opt<bool> EnableIndVarRegisterHeur(
212     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
213     cl::desc("Count the induction variable only once when interleaving"));
214 
215 static cl::opt<bool> EnableCondStoresVectorization(
216     "enable-cond-stores-vec", cl::init(false), cl::Hidden,
217     cl::desc("Enable if predication of stores during vectorization."));
218 
219 static cl::opt<unsigned> MaxNestedScalarReductionIC(
220     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
221     cl::desc("The maximum interleave count to use when interleaving a scalar "
222              "reduction in a nested loop."));
223 
224 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
225     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
226     cl::desc("The maximum allowed number of runtime memory checks with a "
227              "vectorize(enable) pragma."));
228 
229 static cl::opt<unsigned> VectorizeSCEVCheckThreshold(
230     "vectorize-scev-check-threshold", cl::init(16), cl::Hidden,
231     cl::desc("The maximum number of SCEV checks allowed."));
232 
233 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold(
234     "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden,
235     cl::desc("The maximum number of SCEV checks allowed with a "
236              "vectorize(enable) pragma"));
237 
238 namespace {
239 
240 // Forward declarations.
241 class LoopVectorizeHints;
242 class LoopVectorizationLegality;
243 class LoopVectorizationCostModel;
244 class LoopVectorizationRequirements;
245 
246 /// \brief This modifies LoopAccessReport to initialize message with
247 /// loop-vectorizer-specific part.
248 class VectorizationReport : public LoopAccessReport {
249 public:
250   VectorizationReport(Instruction *I = nullptr)
251       : LoopAccessReport("loop not vectorized: ", I) {}
252 
253   /// \brief This allows promotion of the loop-access analysis report into the
254   /// loop-vectorizer report.  It modifies the message to add the
255   /// loop-vectorizer-specific part of the message.
256   explicit VectorizationReport(const LoopAccessReport &R)
257       : LoopAccessReport(Twine("loop not vectorized: ") + R.str(),
258                          R.getInstr()) {}
259 };
260 
261 /// A helper function for converting Scalar types to vector types.
262 /// If the incoming type is void, we return void. If the VF is 1, we return
263 /// the scalar type.
264 static Type *ToVectorTy(Type *Scalar, unsigned VF) {
265   if (Scalar->isVoidTy() || VF == 1)
266     return Scalar;
267   return VectorType::get(Scalar, VF);
268 }
269 
270 /// A helper function that returns GEP instruction and knows to skip a
271 /// 'bitcast'. The 'bitcast' may be skipped if the source and the destination
272 /// pointee types of the 'bitcast' have the same size.
273 /// For example:
274 ///   bitcast double** %var to i64* - can be skipped
275 ///   bitcast double** %var to i8*  - can not
276 static GetElementPtrInst *getGEPInstruction(Value *Ptr) {
277 
278   if (isa<GetElementPtrInst>(Ptr))
279     return cast<GetElementPtrInst>(Ptr);
280 
281   if (isa<BitCastInst>(Ptr) &&
282       isa<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0))) {
283     Type *BitcastTy = Ptr->getType();
284     Type *GEPTy = cast<BitCastInst>(Ptr)->getSrcTy();
285     if (!isa<PointerType>(BitcastTy) || !isa<PointerType>(GEPTy))
286       return nullptr;
287     Type *Pointee1Ty = cast<PointerType>(BitcastTy)->getPointerElementType();
288     Type *Pointee2Ty = cast<PointerType>(GEPTy)->getPointerElementType();
289     const DataLayout &DL = cast<BitCastInst>(Ptr)->getModule()->getDataLayout();
290     if (DL.getTypeSizeInBits(Pointee1Ty) == DL.getTypeSizeInBits(Pointee2Ty))
291       return cast<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0));
292   }
293   return nullptr;
294 }
295 
296 /// InnerLoopVectorizer vectorizes loops which contain only one basic
297 /// block to a specified vectorization factor (VF).
298 /// This class performs the widening of scalars into vectors, or multiple
299 /// scalars. This class also implements the following features:
300 /// * It inserts an epilogue loop for handling loops that don't have iteration
301 ///   counts that are known to be a multiple of the vectorization factor.
302 /// * It handles the code generation for reduction variables.
303 /// * Scalarization (implementation using scalars) of un-vectorizable
304 ///   instructions.
305 /// InnerLoopVectorizer does not perform any vectorization-legality
306 /// checks, and relies on the caller to check for the different legality
307 /// aspects. The InnerLoopVectorizer relies on the
308 /// LoopVectorizationLegality class to provide information about the induction
309 /// and reduction variables that were found to a given vectorization factor.
310 class InnerLoopVectorizer {
311 public:
312   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
313                       LoopInfo *LI, DominatorTree *DT,
314                       const TargetLibraryInfo *TLI,
315                       const TargetTransformInfo *TTI, AssumptionCache *AC,
316                       unsigned VecWidth, unsigned UnrollFactor)
317       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
318         AC(AC), VF(VecWidth), UF(UnrollFactor),
319         Builder(PSE.getSE()->getContext()), Induction(nullptr),
320         OldInduction(nullptr), WidenMap(UnrollFactor), TripCount(nullptr),
321         VectorTripCount(nullptr), Legal(nullptr), AddedSafetyChecks(false) {}
322 
323   // Perform the actual loop widening (vectorization).
324   // MinimumBitWidths maps scalar integer values to the smallest bitwidth they
325   // can be validly truncated to. The cost model has assumed this truncation
326   // will happen when vectorizing.
327   void vectorize(LoopVectorizationLegality *L,
328                  MapVector<Instruction *, uint64_t> MinimumBitWidths) {
329     MinBWs = MinimumBitWidths;
330     Legal = L;
331     // Create a new empty loop. Unlink the old loop and connect the new one.
332     createEmptyLoop();
333     // Widen each instruction in the old loop to a new one in the new loop.
334     // Use the Legality module to find the induction and reduction variables.
335     vectorizeLoop();
336   }
337 
338   // Return true if any runtime check is added.
339   bool IsSafetyChecksAdded() { return AddedSafetyChecks; }
340 
341   virtual ~InnerLoopVectorizer() {}
342 
343 protected:
344   /// A small list of PHINodes.
345   typedef SmallVector<PHINode *, 4> PhiVector;
346   /// When we unroll loops we have multiple vector values for each scalar.
347   /// This data structure holds the unrolled and vectorized values that
348   /// originated from one scalar instruction.
349   typedef SmallVector<Value *, 2> VectorParts;
350 
351   // When we if-convert we need to create edge masks. We have to cache values
352   // so that we don't end up with exponential recursion/IR.
353   typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts>
354       EdgeMaskCache;
355 
356   /// Create an empty loop, based on the loop ranges of the old loop.
357   void createEmptyLoop();
358   /// Create a new induction variable inside L.
359   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
360                                    Value *Step, Instruction *DL);
361   /// Copy and widen the instructions from the old loop.
362   virtual void vectorizeLoop();
363 
364   /// Fix a first-order recurrence. This is the second phase of vectorizing
365   /// this phi node.
366   void fixFirstOrderRecurrence(PHINode *Phi);
367 
368   /// \brief The Loop exit block may have single value PHI nodes where the
369   /// incoming value is 'Undef'. While vectorizing we only handled real values
370   /// that were defined inside the loop. Here we fix the 'undef case'.
371   /// See PR14725.
372   void fixLCSSAPHIs();
373 
374   /// Shrinks vector element sizes based on information in "MinBWs".
375   void truncateToMinimalBitwidths();
376 
377   /// A helper function that computes the predicate of the block BB, assuming
378   /// that the header block of the loop is set to True. It returns the *entry*
379   /// mask for the block BB.
380   VectorParts createBlockInMask(BasicBlock *BB);
381   /// A helper function that computes the predicate of the edge between SRC
382   /// and DST.
383   VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst);
384 
385   /// A helper function to vectorize a single BB within the innermost loop.
386   void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV);
387 
388   /// Vectorize a single PHINode in a block. This method handles the induction
389   /// variable canonicalization. It supports both VF = 1 for unrolled loops and
390   /// arbitrary length vectors.
391   void widenPHIInstruction(Instruction *PN, VectorParts &Entry, unsigned UF,
392                            unsigned VF, PhiVector *PV);
393 
394   /// Insert the new loop to the loop hierarchy and pass manager
395   /// and update the analysis passes.
396   void updateAnalysis();
397 
398   /// This instruction is un-vectorizable. Implement it as a sequence
399   /// of scalars. If \p IfPredicateStore is true we need to 'hide' each
400   /// scalarized instruction behind an if block predicated on the control
401   /// dependence of the instruction.
402   virtual void scalarizeInstruction(Instruction *Instr,
403                                     bool IfPredicateStore = false);
404 
405   /// Vectorize Load and Store instructions,
406   virtual void vectorizeMemoryInstruction(Instruction *Instr);
407 
408   /// Create a broadcast instruction. This method generates a broadcast
409   /// instruction (shuffle) for loop invariant values and for the induction
410   /// value. If this is the induction variable then we extend it to N, N+1, ...
411   /// this is needed because each iteration in the loop corresponds to a SIMD
412   /// element.
413   virtual Value *getBroadcastInstrs(Value *V);
414 
415   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
416   /// to each vector element of Val. The sequence starts at StartIndex.
417   virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step);
418 
419   /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
420   /// to each vector element of Val. The sequence starts at StartIndex.
421   /// Step is a SCEV. In order to get StepValue it takes the existing value
422   /// from SCEV or creates a new using SCEVExpander.
423   virtual Value *getStepVector(Value *Val, int StartIdx, const SCEV *Step);
424 
425   /// When we go over instructions in the basic block we rely on previous
426   /// values within the current basic block or on loop invariant values.
427   /// When we widen (vectorize) values we place them in the map. If the values
428   /// are not within the map, they have to be loop invariant, so we simply
429   /// broadcast them into a vector.
430   VectorParts &getVectorValue(Value *V);
431 
432   /// Try to vectorize the interleaved access group that \p Instr belongs to.
433   void vectorizeInterleaveGroup(Instruction *Instr);
434 
435   /// Generate a shuffle sequence that will reverse the vector Vec.
436   virtual Value *reverseVector(Value *Vec);
437 
438   /// Returns (and creates if needed) the original loop trip count.
439   Value *getOrCreateTripCount(Loop *NewLoop);
440 
441   /// Returns (and creates if needed) the trip count of the widened loop.
442   Value *getOrCreateVectorTripCount(Loop *NewLoop);
443 
444   /// Emit a bypass check to see if the trip count would overflow, or we
445   /// wouldn't have enough iterations to execute one vector loop.
446   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
447   /// Emit a bypass check to see if the vector trip count is nonzero.
448   void emitVectorLoopEnteredCheck(Loop *L, BasicBlock *Bypass);
449   /// Emit a bypass check to see if all of the SCEV assumptions we've
450   /// had to make are correct.
451   void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
452   /// Emit bypass checks to check any memory assumptions we may have made.
453   void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
454 
455   /// Add additional metadata to \p To that was not present on \p Orig.
456   ///
457   /// Currently this is used to add the noalias annotations based on the
458   /// inserted memchecks.  Use this for instructions that are *cloned* into the
459   /// vector loop.
460   void addNewMetadata(Instruction *To, const Instruction *Orig);
461 
462   /// Add metadata from one instruction to another.
463   ///
464   /// This includes both the original MDs from \p From and additional ones (\see
465   /// addNewMetadata).  Use this for *newly created* instructions in the vector
466   /// loop.
467   void addMetadata(Instruction *To, const Instruction *From);
468 
469   /// \brief Similar to the previous function but it adds the metadata to a
470   /// vector of instructions.
471   void addMetadata(SmallVectorImpl<Value *> &To, const Instruction *From);
472 
473   /// This is a helper class that holds the vectorizer state. It maps scalar
474   /// instructions to vector instructions. When the code is 'unrolled' then
475   /// then a single scalar value is mapped to multiple vector parts. The parts
476   /// are stored in the VectorPart type.
477   struct ValueMap {
478     /// C'tor.  UnrollFactor controls the number of vectors ('parts') that
479     /// are mapped.
480     ValueMap(unsigned UnrollFactor) : UF(UnrollFactor) {}
481 
482     /// \return True if 'Key' is saved in the Value Map.
483     bool has(Value *Key) const { return MapStorage.count(Key); }
484 
485     /// Initializes a new entry in the map. Sets all of the vector parts to the
486     /// save value in 'Val'.
487     /// \return A reference to a vector with splat values.
488     VectorParts &splat(Value *Key, Value *Val) {
489       VectorParts &Entry = MapStorage[Key];
490       Entry.assign(UF, Val);
491       return Entry;
492     }
493 
494     ///\return A reference to the value that is stored at 'Key'.
495     VectorParts &get(Value *Key) {
496       VectorParts &Entry = MapStorage[Key];
497       if (Entry.empty())
498         Entry.resize(UF);
499       assert(Entry.size() == UF);
500       return Entry;
501     }
502 
503   private:
504     /// The unroll factor. Each entry in the map stores this number of vector
505     /// elements.
506     unsigned UF;
507 
508     /// Map storage. We use std::map and not DenseMap because insertions to a
509     /// dense map invalidates its iterators.
510     std::map<Value *, VectorParts> MapStorage;
511   };
512 
513   /// The original loop.
514   Loop *OrigLoop;
515   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
516   /// dynamic knowledge to simplify SCEV expressions and converts them to a
517   /// more usable form.
518   PredicatedScalarEvolution &PSE;
519   /// Loop Info.
520   LoopInfo *LI;
521   /// Dominator Tree.
522   DominatorTree *DT;
523   /// Alias Analysis.
524   AliasAnalysis *AA;
525   /// Target Library Info.
526   const TargetLibraryInfo *TLI;
527   /// Target Transform Info.
528   const TargetTransformInfo *TTI;
529   /// Assumption Cache.
530   AssumptionCache *AC;
531 
532   /// \brief LoopVersioning.  It's only set up (non-null) if memchecks were
533   /// used.
534   ///
535   /// This is currently only used to add no-alias metadata based on the
536   /// memchecks.  The actually versioning is performed manually.
537   std::unique_ptr<LoopVersioning> LVer;
538 
539   /// The vectorization SIMD factor to use. Each vector will have this many
540   /// vector elements.
541   unsigned VF;
542 
543 protected:
544   /// The vectorization unroll factor to use. Each scalar is vectorized to this
545   /// many different vector instructions.
546   unsigned UF;
547 
548   /// The builder that we use
549   IRBuilder<> Builder;
550 
551   // --- Vectorization state ---
552 
553   /// The vector-loop preheader.
554   BasicBlock *LoopVectorPreHeader;
555   /// The scalar-loop preheader.
556   BasicBlock *LoopScalarPreHeader;
557   /// Middle Block between the vector and the scalar.
558   BasicBlock *LoopMiddleBlock;
559   /// The ExitBlock of the scalar loop.
560   BasicBlock *LoopExitBlock;
561   /// The vector loop body.
562   BasicBlock *LoopVectorBody;
563   /// The scalar loop body.
564   BasicBlock *LoopScalarBody;
565   /// A list of all bypass blocks. The first block is the entry of the loop.
566   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
567 
568   /// The new Induction variable which was added to the new block.
569   PHINode *Induction;
570   /// The induction variable of the old basic block.
571   PHINode *OldInduction;
572   /// Maps scalars to widened vectors.
573   ValueMap WidenMap;
574   /// Store instructions that should be predicated, as a pair
575   ///   <StoreInst, Predicate>
576   SmallVector<std::pair<StoreInst *, Value *>, 4> PredicatedStores;
577   EdgeMaskCache MaskCache;
578   /// Trip count of the original loop.
579   Value *TripCount;
580   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
581   Value *VectorTripCount;
582 
583   /// Map of scalar integer values to the smallest bitwidth they can be legally
584   /// represented as. The vector equivalents of these values should be truncated
585   /// to this type.
586   MapVector<Instruction *, uint64_t> MinBWs;
587   LoopVectorizationLegality *Legal;
588 
589   // Record whether runtime check is added.
590   bool AddedSafetyChecks;
591 };
592 
593 class InnerLoopUnroller : public InnerLoopVectorizer {
594 public:
595   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
596                     LoopInfo *LI, DominatorTree *DT,
597                     const TargetLibraryInfo *TLI,
598                     const TargetTransformInfo *TTI, AssumptionCache *AC,
599                     unsigned UnrollFactor)
600       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, 1,
601                             UnrollFactor) {}
602 
603 private:
604   void scalarizeInstruction(Instruction *Instr,
605                             bool IfPredicateStore = false) override;
606   void vectorizeMemoryInstruction(Instruction *Instr) override;
607   Value *getBroadcastInstrs(Value *V) override;
608   Value *getStepVector(Value *Val, int StartIdx, Value *Step) override;
609   Value *getStepVector(Value *Val, int StartIdx, const SCEV *StepSCEV) override;
610   Value *reverseVector(Value *Vec) override;
611 };
612 
613 /// \brief Look for a meaningful debug location on the instruction or it's
614 /// operands.
615 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
616   if (!I)
617     return I;
618 
619   DebugLoc Empty;
620   if (I->getDebugLoc() != Empty)
621     return I;
622 
623   for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
624     if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
625       if (OpInst->getDebugLoc() != Empty)
626         return OpInst;
627   }
628 
629   return I;
630 }
631 
632 /// \brief Set the debug location in the builder using the debug location in the
633 /// instruction.
634 static void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
635   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr))
636     B.SetCurrentDebugLocation(Inst->getDebugLoc());
637   else
638     B.SetCurrentDebugLocation(DebugLoc());
639 }
640 
641 #ifndef NDEBUG
642 /// \return string containing a file name and a line # for the given loop.
643 static std::string getDebugLocString(const Loop *L) {
644   std::string Result;
645   if (L) {
646     raw_string_ostream OS(Result);
647     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
648       LoopDbgLoc.print(OS);
649     else
650       // Just print the module name.
651       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
652     OS.flush();
653   }
654   return Result;
655 }
656 #endif
657 
658 /// \brief Propagate known metadata from one instruction to another.
659 static void propagateMetadata(Instruction *To, const Instruction *From) {
660   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
661   From->getAllMetadataOtherThanDebugLoc(Metadata);
662 
663   for (auto M : Metadata) {
664     unsigned Kind = M.first;
665 
666     // These are safe to transfer (this is safe for TBAA, even when we
667     // if-convert, because should that metadata have had a control dependency
668     // on the condition, and thus actually aliased with some other
669     // non-speculated memory access when the condition was false, this would be
670     // caught by the runtime overlap checks).
671     if (Kind != LLVMContext::MD_tbaa && Kind != LLVMContext::MD_alias_scope &&
672         Kind != LLVMContext::MD_noalias && Kind != LLVMContext::MD_fpmath &&
673         Kind != LLVMContext::MD_nontemporal)
674       continue;
675 
676     To->setMetadata(Kind, M.second);
677   }
678 }
679 
680 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
681                                          const Instruction *Orig) {
682   // If the loop was versioned with memchecks, add the corresponding no-alias
683   // metadata.
684   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
685     LVer->annotateInstWithNoAlias(To, Orig);
686 }
687 
688 void InnerLoopVectorizer::addMetadata(Instruction *To,
689                                       const Instruction *From) {
690   propagateMetadata(To, From);
691   addNewMetadata(To, From);
692 }
693 
694 void InnerLoopVectorizer::addMetadata(SmallVectorImpl<Value *> &To,
695                                       const Instruction *From) {
696   for (Value *V : To)
697     if (Instruction *I = dyn_cast<Instruction>(V))
698       addMetadata(I, From);
699 }
700 
701 /// \brief The group of interleaved loads/stores sharing the same stride and
702 /// close to each other.
703 ///
704 /// Each member in this group has an index starting from 0, and the largest
705 /// index should be less than interleaved factor, which is equal to the absolute
706 /// value of the access's stride.
707 ///
708 /// E.g. An interleaved load group of factor 4:
709 ///        for (unsigned i = 0; i < 1024; i+=4) {
710 ///          a = A[i];                           // Member of index 0
711 ///          b = A[i+1];                         // Member of index 1
712 ///          d = A[i+3];                         // Member of index 3
713 ///          ...
714 ///        }
715 ///
716 ///      An interleaved store group of factor 4:
717 ///        for (unsigned i = 0; i < 1024; i+=4) {
718 ///          ...
719 ///          A[i]   = a;                         // Member of index 0
720 ///          A[i+1] = b;                         // Member of index 1
721 ///          A[i+2] = c;                         // Member of index 2
722 ///          A[i+3] = d;                         // Member of index 3
723 ///        }
724 ///
725 /// Note: the interleaved load group could have gaps (missing members), but
726 /// the interleaved store group doesn't allow gaps.
727 class InterleaveGroup {
728 public:
729   InterleaveGroup(Instruction *Instr, int Stride, unsigned Align)
730       : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) {
731     assert(Align && "The alignment should be non-zero");
732 
733     Factor = std::abs(Stride);
734     assert(Factor > 1 && "Invalid interleave factor");
735 
736     Reverse = Stride < 0;
737     Members[0] = Instr;
738   }
739 
740   bool isReverse() const { return Reverse; }
741   unsigned getFactor() const { return Factor; }
742   unsigned getAlignment() const { return Align; }
743   unsigned getNumMembers() const { return Members.size(); }
744 
745   /// \brief Try to insert a new member \p Instr with index \p Index and
746   /// alignment \p NewAlign. The index is related to the leader and it could be
747   /// negative if it is the new leader.
748   ///
749   /// \returns false if the instruction doesn't belong to the group.
750   bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) {
751     assert(NewAlign && "The new member's alignment should be non-zero");
752 
753     int Key = Index + SmallestKey;
754 
755     // Skip if there is already a member with the same index.
756     if (Members.count(Key))
757       return false;
758 
759     if (Key > LargestKey) {
760       // The largest index is always less than the interleave factor.
761       if (Index >= static_cast<int>(Factor))
762         return false;
763 
764       LargestKey = Key;
765     } else if (Key < SmallestKey) {
766       // The largest index is always less than the interleave factor.
767       if (LargestKey - Key >= static_cast<int>(Factor))
768         return false;
769 
770       SmallestKey = Key;
771     }
772 
773     // It's always safe to select the minimum alignment.
774     Align = std::min(Align, NewAlign);
775     Members[Key] = Instr;
776     return true;
777   }
778 
779   /// \brief Get the member with the given index \p Index
780   ///
781   /// \returns nullptr if contains no such member.
782   Instruction *getMember(unsigned Index) const {
783     int Key = SmallestKey + Index;
784     if (!Members.count(Key))
785       return nullptr;
786 
787     return Members.find(Key)->second;
788   }
789 
790   /// \brief Get the index for the given member. Unlike the key in the member
791   /// map, the index starts from 0.
792   unsigned getIndex(Instruction *Instr) const {
793     for (auto I : Members)
794       if (I.second == Instr)
795         return I.first - SmallestKey;
796 
797     llvm_unreachable("InterleaveGroup contains no such member");
798   }
799 
800   Instruction *getInsertPos() const { return InsertPos; }
801   void setInsertPos(Instruction *Inst) { InsertPos = Inst; }
802 
803 private:
804   unsigned Factor; // Interleave Factor.
805   bool Reverse;
806   unsigned Align;
807   DenseMap<int, Instruction *> Members;
808   int SmallestKey;
809   int LargestKey;
810 
811   // To avoid breaking dependences, vectorized instructions of an interleave
812   // group should be inserted at either the first load or the last store in
813   // program order.
814   //
815   // E.g. %even = load i32             // Insert Position
816   //      %add = add i32 %even         // Use of %even
817   //      %odd = load i32
818   //
819   //      store i32 %even
820   //      %odd = add i32               // Def of %odd
821   //      store i32 %odd               // Insert Position
822   Instruction *InsertPos;
823 };
824 
825 /// \brief Drive the analysis of interleaved memory accesses in the loop.
826 ///
827 /// Use this class to analyze interleaved accesses only when we can vectorize
828 /// a loop. Otherwise it's meaningless to do analysis as the vectorization
829 /// on interleaved accesses is unsafe.
830 ///
831 /// The analysis collects interleave groups and records the relationships
832 /// between the member and the group in a map.
833 class InterleavedAccessInfo {
834 public:
835   InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L,
836                         DominatorTree *DT)
837       : PSE(PSE), TheLoop(L), DT(DT), RequiresScalarEpilogue(false) {}
838 
839   ~InterleavedAccessInfo() {
840     SmallSet<InterleaveGroup *, 4> DelSet;
841     // Avoid releasing a pointer twice.
842     for (auto &I : InterleaveGroupMap)
843       DelSet.insert(I.second);
844     for (auto *Ptr : DelSet)
845       delete Ptr;
846   }
847 
848   /// \brief Analyze the interleaved accesses and collect them in interleave
849   /// groups. Substitute symbolic strides using \p Strides.
850   void analyzeInterleaving(const ValueToValueMap &Strides);
851 
852   /// \brief Check if \p Instr belongs to any interleave group.
853   bool isInterleaved(Instruction *Instr) const {
854     return InterleaveGroupMap.count(Instr);
855   }
856 
857   /// \brief Return the maximum interleave factor of all interleaved groups.
858   unsigned getMaxInterleaveFactor() const {
859     unsigned MaxFactor = 1;
860     for (auto &Entry : InterleaveGroupMap)
861       MaxFactor = std::max(MaxFactor, Entry.second->getFactor());
862     return MaxFactor;
863   }
864 
865   /// \brief Get the interleave group that \p Instr belongs to.
866   ///
867   /// \returns nullptr if doesn't have such group.
868   InterleaveGroup *getInterleaveGroup(Instruction *Instr) const {
869     if (InterleaveGroupMap.count(Instr))
870       return InterleaveGroupMap.find(Instr)->second;
871     return nullptr;
872   }
873 
874   /// \brief Returns true if an interleaved group that may access memory
875   /// out-of-bounds requires a scalar epilogue iteration for correctness.
876   bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; }
877 
878 private:
879   /// A wrapper around ScalarEvolution, used to add runtime SCEV checks.
880   /// Simplifies SCEV expressions in the context of existing SCEV assumptions.
881   /// The interleaved access analysis can also add new predicates (for example
882   /// by versioning strides of pointers).
883   PredicatedScalarEvolution &PSE;
884   Loop *TheLoop;
885   DominatorTree *DT;
886 
887   /// True if the loop may contain non-reversed interleaved groups with
888   /// out-of-bounds accesses. We ensure we don't speculatively access memory
889   /// out-of-bounds by executing at least one scalar epilogue iteration.
890   bool RequiresScalarEpilogue;
891 
892   /// Holds the relationships between the members and the interleave group.
893   DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap;
894 
895   /// \brief The descriptor for a strided memory access.
896   struct StrideDescriptor {
897     StrideDescriptor(int Stride, const SCEV *Scev, unsigned Size,
898                      unsigned Align)
899         : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {}
900 
901     StrideDescriptor() : Stride(0), Scev(nullptr), Size(0), Align(0) {}
902 
903     int Stride; // The access's stride. It is negative for a reverse access.
904     const SCEV *Scev; // The scalar expression of this access
905     unsigned Size;    // The size of the memory object.
906     unsigned Align;   // The alignment of this access.
907   };
908 
909   /// \brief Create a new interleave group with the given instruction \p Instr,
910   /// stride \p Stride and alignment \p Align.
911   ///
912   /// \returns the newly created interleave group.
913   InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride,
914                                          unsigned Align) {
915     assert(!InterleaveGroupMap.count(Instr) &&
916            "Already in an interleaved access group");
917     InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align);
918     return InterleaveGroupMap[Instr];
919   }
920 
921   /// \brief Release the group and remove all the relationships.
922   void releaseGroup(InterleaveGroup *Group) {
923     for (unsigned i = 0; i < Group->getFactor(); i++)
924       if (Instruction *Member = Group->getMember(i))
925         InterleaveGroupMap.erase(Member);
926 
927     delete Group;
928   }
929 
930   /// \brief Collect all the accesses with a constant stride in program order.
931   void collectConstStridedAccesses(
932       MapVector<Instruction *, StrideDescriptor> &StrideAccesses,
933       const ValueToValueMap &Strides);
934 };
935 
936 /// Utility class for getting and setting loop vectorizer hints in the form
937 /// of loop metadata.
938 /// This class keeps a number of loop annotations locally (as member variables)
939 /// and can, upon request, write them back as metadata on the loop. It will
940 /// initially scan the loop for existing metadata, and will update the local
941 /// values based on information in the loop.
942 /// We cannot write all values to metadata, as the mere presence of some info,
943 /// for example 'force', means a decision has been made. So, we need to be
944 /// careful NOT to add them if the user hasn't specifically asked so.
945 class LoopVectorizeHints {
946   enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE };
947 
948   /// Hint - associates name and validation with the hint value.
949   struct Hint {
950     const char *Name;
951     unsigned Value; // This may have to change for non-numeric values.
952     HintKind Kind;
953 
954     Hint(const char *Name, unsigned Value, HintKind Kind)
955         : Name(Name), Value(Value), Kind(Kind) {}
956 
957     bool validate(unsigned Val) {
958       switch (Kind) {
959       case HK_WIDTH:
960         return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth;
961       case HK_UNROLL:
962         return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor;
963       case HK_FORCE:
964         return (Val <= 1);
965       }
966       return false;
967     }
968   };
969 
970   /// Vectorization width.
971   Hint Width;
972   /// Vectorization interleave factor.
973   Hint Interleave;
974   /// Vectorization forced
975   Hint Force;
976 
977   /// Return the loop metadata prefix.
978   static StringRef Prefix() { return "llvm.loop."; }
979 
980   /// True if there is any unsafe math in the loop.
981   bool PotentiallyUnsafe;
982 
983 public:
984   enum ForceKind {
985     FK_Undefined = -1, ///< Not selected.
986     FK_Disabled = 0,   ///< Forcing disabled.
987     FK_Enabled = 1,    ///< Forcing enabled.
988   };
989 
990   LoopVectorizeHints(const Loop *L, bool DisableInterleaving)
991       : Width("vectorize.width", VectorizerParams::VectorizationFactor,
992               HK_WIDTH),
993         Interleave("interleave.count", DisableInterleaving, HK_UNROLL),
994         Force("vectorize.enable", FK_Undefined, HK_FORCE),
995         PotentiallyUnsafe(false), TheLoop(L) {
996     // Populate values with existing loop metadata.
997     getHintsFromMetadata();
998 
999     // force-vector-interleave overrides DisableInterleaving.
1000     if (VectorizerParams::isInterleaveForced())
1001       Interleave.Value = VectorizerParams::VectorizationInterleave;
1002 
1003     DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs()
1004           << "LV: Interleaving disabled by the pass manager\n");
1005   }
1006 
1007   /// Mark the loop L as already vectorized by setting the width to 1.
1008   void setAlreadyVectorized() {
1009     Width.Value = Interleave.Value = 1;
1010     Hint Hints[] = {Width, Interleave};
1011     writeHintsToMetadata(Hints);
1012   }
1013 
1014   bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const {
1015     if (getForce() == LoopVectorizeHints::FK_Disabled) {
1016       DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n");
1017       emitOptimizationRemarkAnalysis(F->getContext(),
1018                                      vectorizeAnalysisPassName(), *F,
1019                                      L->getStartLoc(), emitRemark());
1020       return false;
1021     }
1022 
1023     if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) {
1024       DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n");
1025       emitOptimizationRemarkAnalysis(F->getContext(),
1026                                      vectorizeAnalysisPassName(), *F,
1027                                      L->getStartLoc(), emitRemark());
1028       return false;
1029     }
1030 
1031     if (getWidth() == 1 && getInterleave() == 1) {
1032       // FIXME: Add a separate metadata to indicate when the loop has already
1033       // been vectorized instead of setting width and count to 1.
1034       DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n");
1035       // FIXME: Add interleave.disable metadata. This will allow
1036       // vectorize.disable to be used without disabling the pass and errors
1037       // to differentiate between disabled vectorization and a width of 1.
1038       emitOptimizationRemarkAnalysis(
1039           F->getContext(), vectorizeAnalysisPassName(), *F, L->getStartLoc(),
1040           "loop not vectorized: vectorization and interleaving are explicitly "
1041           "disabled, or vectorize width and interleave count are both set to "
1042           "1");
1043       return false;
1044     }
1045 
1046     return true;
1047   }
1048 
1049   /// Dumps all the hint information.
1050   std::string emitRemark() const {
1051     VectorizationReport R;
1052     if (Force.Value == LoopVectorizeHints::FK_Disabled)
1053       R << "vectorization is explicitly disabled";
1054     else {
1055       R << "use -Rpass-analysis=loop-vectorize for more info";
1056       if (Force.Value == LoopVectorizeHints::FK_Enabled) {
1057         R << " (Force=true";
1058         if (Width.Value != 0)
1059           R << ", Vector Width=" << Width.Value;
1060         if (Interleave.Value != 0)
1061           R << ", Interleave Count=" << Interleave.Value;
1062         R << ")";
1063       }
1064     }
1065 
1066     return R.str();
1067   }
1068 
1069   unsigned getWidth() const { return Width.Value; }
1070   unsigned getInterleave() const { return Interleave.Value; }
1071   enum ForceKind getForce() const { return (ForceKind)Force.Value; }
1072   const char *vectorizeAnalysisPassName() const {
1073     // If hints are provided that don't disable vectorization use the
1074     // AlwaysPrint pass name to force the frontend to print the diagnostic.
1075     if (getWidth() == 1)
1076       return LV_NAME;
1077     if (getForce() == LoopVectorizeHints::FK_Disabled)
1078       return LV_NAME;
1079     if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0)
1080       return LV_NAME;
1081     return DiagnosticInfo::AlwaysPrint;
1082   }
1083 
1084   bool allowReordering() const {
1085     // When enabling loop hints are provided we allow the vectorizer to change
1086     // the order of operations that is given by the scalar loop. This is not
1087     // enabled by default because can be unsafe or inefficient. For example,
1088     // reordering floating-point operations will change the way round-off
1089     // error accumulates in the loop.
1090     return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1;
1091   }
1092 
1093   bool isPotentiallyUnsafe() const {
1094     // Avoid FP vectorization if the target is unsure about proper support.
1095     // This may be related to the SIMD unit in the target not handling
1096     // IEEE 754 FP ops properly, or bad single-to-double promotions.
1097     // Otherwise, a sequence of vectorized loops, even without reduction,
1098     // could lead to different end results on the destination vectors.
1099     return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe;
1100   }
1101 
1102   void setPotentiallyUnsafe() { PotentiallyUnsafe = true; }
1103 
1104 private:
1105   /// Find hints specified in the loop metadata and update local values.
1106   void getHintsFromMetadata() {
1107     MDNode *LoopID = TheLoop->getLoopID();
1108     if (!LoopID)
1109       return;
1110 
1111     // First operand should refer to the loop id itself.
1112     assert(LoopID->getNumOperands() > 0 && "requires at least one operand");
1113     assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
1114 
1115     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
1116       const MDString *S = nullptr;
1117       SmallVector<Metadata *, 4> Args;
1118 
1119       // The expected hint is either a MDString or a MDNode with the first
1120       // operand a MDString.
1121       if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) {
1122         if (!MD || MD->getNumOperands() == 0)
1123           continue;
1124         S = dyn_cast<MDString>(MD->getOperand(0));
1125         for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i)
1126           Args.push_back(MD->getOperand(i));
1127       } else {
1128         S = dyn_cast<MDString>(LoopID->getOperand(i));
1129         assert(Args.size() == 0 && "too many arguments for MDString");
1130       }
1131 
1132       if (!S)
1133         continue;
1134 
1135       // Check if the hint starts with the loop metadata prefix.
1136       StringRef Name = S->getString();
1137       if (Args.size() == 1)
1138         setHint(Name, Args[0]);
1139     }
1140   }
1141 
1142   /// Checks string hint with one operand and set value if valid.
1143   void setHint(StringRef Name, Metadata *Arg) {
1144     if (!Name.startswith(Prefix()))
1145       return;
1146     Name = Name.substr(Prefix().size(), StringRef::npos);
1147 
1148     const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg);
1149     if (!C)
1150       return;
1151     unsigned Val = C->getZExtValue();
1152 
1153     Hint *Hints[] = {&Width, &Interleave, &Force};
1154     for (auto H : Hints) {
1155       if (Name == H->Name) {
1156         if (H->validate(Val))
1157           H->Value = Val;
1158         else
1159           DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n");
1160         break;
1161       }
1162     }
1163   }
1164 
1165   /// Create a new hint from name / value pair.
1166   MDNode *createHintMetadata(StringRef Name, unsigned V) const {
1167     LLVMContext &Context = TheLoop->getHeader()->getContext();
1168     Metadata *MDs[] = {MDString::get(Context, Name),
1169                        ConstantAsMetadata::get(
1170                            ConstantInt::get(Type::getInt32Ty(Context), V))};
1171     return MDNode::get(Context, MDs);
1172   }
1173 
1174   /// Matches metadata with hint name.
1175   bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) {
1176     MDString *Name = dyn_cast<MDString>(Node->getOperand(0));
1177     if (!Name)
1178       return false;
1179 
1180     for (auto H : HintTypes)
1181       if (Name->getString().endswith(H.Name))
1182         return true;
1183     return false;
1184   }
1185 
1186   /// Sets current hints into loop metadata, keeping other values intact.
1187   void writeHintsToMetadata(ArrayRef<Hint> HintTypes) {
1188     if (HintTypes.size() == 0)
1189       return;
1190 
1191     // Reserve the first element to LoopID (see below).
1192     SmallVector<Metadata *, 4> MDs(1);
1193     // If the loop already has metadata, then ignore the existing operands.
1194     MDNode *LoopID = TheLoop->getLoopID();
1195     if (LoopID) {
1196       for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
1197         MDNode *Node = cast<MDNode>(LoopID->getOperand(i));
1198         // If node in update list, ignore old value.
1199         if (!matchesHintMetadataName(Node, HintTypes))
1200           MDs.push_back(Node);
1201       }
1202     }
1203 
1204     // Now, add the missing hints.
1205     for (auto H : HintTypes)
1206       MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value));
1207 
1208     // Replace current metadata node with new one.
1209     LLVMContext &Context = TheLoop->getHeader()->getContext();
1210     MDNode *NewLoopID = MDNode::get(Context, MDs);
1211     // Set operand 0 to refer to the loop id itself.
1212     NewLoopID->replaceOperandWith(0, NewLoopID);
1213 
1214     TheLoop->setLoopID(NewLoopID);
1215   }
1216 
1217   /// The loop these hints belong to.
1218   const Loop *TheLoop;
1219 };
1220 
1221 static void emitAnalysisDiag(const Function *TheFunction, const Loop *TheLoop,
1222                              const LoopVectorizeHints &Hints,
1223                              const LoopAccessReport &Message) {
1224   const char *Name = Hints.vectorizeAnalysisPassName();
1225   LoopAccessReport::emitAnalysis(Message, TheFunction, TheLoop, Name);
1226 }
1227 
1228 static void emitMissedWarning(Function *F, Loop *L,
1229                               const LoopVectorizeHints &LH) {
1230   emitOptimizationRemarkMissed(F->getContext(), LV_NAME, *F, L->getStartLoc(),
1231                                LH.emitRemark());
1232 
1233   if (LH.getForce() == LoopVectorizeHints::FK_Enabled) {
1234     if (LH.getWidth() != 1)
1235       emitLoopVectorizeWarning(
1236           F->getContext(), *F, L->getStartLoc(),
1237           "failed explicitly specified loop vectorization");
1238     else if (LH.getInterleave() != 1)
1239       emitLoopInterleaveWarning(
1240           F->getContext(), *F, L->getStartLoc(),
1241           "failed explicitly specified loop interleaving");
1242   }
1243 }
1244 
1245 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and
1246 /// to what vectorization factor.
1247 /// This class does not look at the profitability of vectorization, only the
1248 /// legality. This class has two main kinds of checks:
1249 /// * Memory checks - The code in canVectorizeMemory checks if vectorization
1250 ///   will change the order of memory accesses in a way that will change the
1251 ///   correctness of the program.
1252 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory
1253 /// checks for a number of different conditions, such as the availability of a
1254 /// single induction variable, that all types are supported and vectorize-able,
1255 /// etc. This code reflects the capabilities of InnerLoopVectorizer.
1256 /// This class is also used by InnerLoopVectorizer for identifying
1257 /// induction variable and the different reduction variables.
1258 class LoopVectorizationLegality {
1259 public:
1260   LoopVectorizationLegality(Loop *L, PredicatedScalarEvolution &PSE,
1261                             DominatorTree *DT, TargetLibraryInfo *TLI,
1262                             AliasAnalysis *AA, Function *F,
1263                             const TargetTransformInfo *TTI,
1264                             LoopAccessAnalysis *LAA,
1265                             LoopVectorizationRequirements *R,
1266                             LoopVectorizeHints *H)
1267       : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TheFunction(F),
1268         TTI(TTI), DT(DT), LAA(LAA), LAI(nullptr), InterleaveInfo(PSE, L, DT),
1269         Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false),
1270         Requirements(R), Hints(H) {}
1271 
1272   /// ReductionList contains the reduction descriptors for all
1273   /// of the reductions that were found in the loop.
1274   typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList;
1275 
1276   /// InductionList saves induction variables and maps them to the
1277   /// induction descriptor.
1278   typedef MapVector<PHINode *, InductionDescriptor> InductionList;
1279 
1280   /// RecurrenceSet contains the phi nodes that are recurrences other than
1281   /// inductions and reductions.
1282   typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet;
1283 
1284   /// Returns true if it is legal to vectorize this loop.
1285   /// This does not mean that it is profitable to vectorize this
1286   /// loop, only that it is legal to do so.
1287   bool canVectorize();
1288 
1289   /// Returns the Induction variable.
1290   PHINode *getInduction() { return Induction; }
1291 
1292   /// Returns the reduction variables found in the loop.
1293   ReductionList *getReductionVars() { return &Reductions; }
1294 
1295   /// Returns the induction variables found in the loop.
1296   InductionList *getInductionVars() { return &Inductions; }
1297 
1298   /// Return the first-order recurrences found in the loop.
1299   RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; }
1300 
1301   /// Returns the widest induction type.
1302   Type *getWidestInductionType() { return WidestIndTy; }
1303 
1304   /// Returns True if V is an induction variable in this loop.
1305   bool isInductionVariable(const Value *V);
1306 
1307   /// Returns True if PN is a reduction variable in this loop.
1308   bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); }
1309 
1310   /// Returns True if Phi is a first-order recurrence in this loop.
1311   bool isFirstOrderRecurrence(const PHINode *Phi);
1312 
1313   /// Return true if the block BB needs to be predicated in order for the loop
1314   /// to be vectorized.
1315   bool blockNeedsPredication(BasicBlock *BB);
1316 
1317   /// Check if this  pointer is consecutive when vectorizing. This happens
1318   /// when the last index of the GEP is the induction variable, or that the
1319   /// pointer itself is an induction variable.
1320   /// This check allows us to vectorize A[idx] into a wide load/store.
1321   /// Returns:
1322   /// 0 - Stride is unknown or non-consecutive.
1323   /// 1 - Address is consecutive.
1324   /// -1 - Address is consecutive, and decreasing.
1325   int isConsecutivePtr(Value *Ptr);
1326 
1327   /// Returns true if the value V is uniform within the loop.
1328   bool isUniform(Value *V);
1329 
1330   /// Returns true if this instruction will remain scalar after vectorization.
1331   bool isUniformAfterVectorization(Instruction *I) { return Uniforms.count(I); }
1332 
1333   /// Returns the information that we collected about runtime memory check.
1334   const RuntimePointerChecking *getRuntimePointerChecking() const {
1335     return LAI->getRuntimePointerChecking();
1336   }
1337 
1338   const LoopAccessInfo *getLAI() const { return LAI; }
1339 
1340   /// \brief Check if \p Instr belongs to any interleaved access group.
1341   bool isAccessInterleaved(Instruction *Instr) {
1342     return InterleaveInfo.isInterleaved(Instr);
1343   }
1344 
1345   /// \brief Return the maximum interleave factor of all interleaved groups.
1346   unsigned getMaxInterleaveFactor() const {
1347     return InterleaveInfo.getMaxInterleaveFactor();
1348   }
1349 
1350   /// \brief Get the interleaved access group that \p Instr belongs to.
1351   const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) {
1352     return InterleaveInfo.getInterleaveGroup(Instr);
1353   }
1354 
1355   /// \brief Returns true if an interleaved group requires a scalar iteration
1356   /// to handle accesses with gaps.
1357   bool requiresScalarEpilogue() const {
1358     return InterleaveInfo.requiresScalarEpilogue();
1359   }
1360 
1361   unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); }
1362 
1363   bool hasStride(Value *V) { return StrideSet.count(V); }
1364   bool mustCheckStrides() { return !StrideSet.empty(); }
1365   SmallPtrSet<Value *, 8>::iterator strides_begin() {
1366     return StrideSet.begin();
1367   }
1368   SmallPtrSet<Value *, 8>::iterator strides_end() { return StrideSet.end(); }
1369 
1370   /// Returns true if the target machine supports masked store operation
1371   /// for the given \p DataType and kind of access to \p Ptr.
1372   bool isLegalMaskedStore(Type *DataType, Value *Ptr) {
1373     return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType);
1374   }
1375   /// Returns true if the target machine supports masked load operation
1376   /// for the given \p DataType and kind of access to \p Ptr.
1377   bool isLegalMaskedLoad(Type *DataType, Value *Ptr) {
1378     return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType);
1379   }
1380   /// Returns true if the target machine supports masked scatter operation
1381   /// for the given \p DataType.
1382   bool isLegalMaskedScatter(Type *DataType) {
1383     return TTI->isLegalMaskedScatter(DataType);
1384   }
1385   /// Returns true if the target machine supports masked gather operation
1386   /// for the given \p DataType.
1387   bool isLegalMaskedGather(Type *DataType) {
1388     return TTI->isLegalMaskedGather(DataType);
1389   }
1390 
1391   /// Returns true if vector representation of the instruction \p I
1392   /// requires mask.
1393   bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); }
1394   unsigned getNumStores() const { return LAI->getNumStores(); }
1395   unsigned getNumLoads() const { return LAI->getNumLoads(); }
1396   unsigned getNumPredStores() const { return NumPredStores; }
1397 
1398 private:
1399   /// Check if a single basic block loop is vectorizable.
1400   /// At this point we know that this is a loop with a constant trip count
1401   /// and we only need to check individual instructions.
1402   bool canVectorizeInstrs();
1403 
1404   /// When we vectorize loops we may change the order in which
1405   /// we read and write from memory. This method checks if it is
1406   /// legal to vectorize the code, considering only memory constrains.
1407   /// Returns true if the loop is vectorizable
1408   bool canVectorizeMemory();
1409 
1410   /// Return true if we can vectorize this loop using the IF-conversion
1411   /// transformation.
1412   bool canVectorizeWithIfConvert();
1413 
1414   /// Collect the variables that need to stay uniform after vectorization.
1415   void collectLoopUniforms();
1416 
1417   /// Return true if all of the instructions in the block can be speculatively
1418   /// executed. \p SafePtrs is a list of addresses that are known to be legal
1419   /// and we know that we can read from them without segfault.
1420   bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs);
1421 
1422   /// \brief Collect memory access with loop invariant strides.
1423   ///
1424   /// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop
1425   /// invariant.
1426   void collectStridedAccess(Value *LoadOrStoreInst);
1427 
1428   /// \brief Returns true if we can vectorize using this PHI node as an
1429   /// induction.
1430   ///
1431   /// Updates the vectorization state by adding \p Phi to the inductions list.
1432   /// This can set \p Phi as the main induction of the loop if \p Phi is a
1433   /// better choice for the main induction than the existing one.
1434   bool addInductionPhi(PHINode *Phi, InductionDescriptor ID);
1435 
1436   /// Report an analysis message to assist the user in diagnosing loops that are
1437   /// not vectorized.  These are handled as LoopAccessReport rather than
1438   /// VectorizationReport because the << operator of VectorizationReport returns
1439   /// LoopAccessReport.
1440   void emitAnalysis(const LoopAccessReport &Message) const {
1441     emitAnalysisDiag(TheFunction, TheLoop, *Hints, Message);
1442   }
1443 
1444   unsigned NumPredStores;
1445 
1446   /// The loop that we evaluate.
1447   Loop *TheLoop;
1448   /// A wrapper around ScalarEvolution used to add runtime SCEV checks.
1449   /// Applies dynamic knowledge to simplify SCEV expressions in the context
1450   /// of existing SCEV assumptions. The analysis will also add a minimal set
1451   /// of new predicates if this is required to enable vectorization and
1452   /// unrolling.
1453   PredicatedScalarEvolution &PSE;
1454   /// Target Library Info.
1455   TargetLibraryInfo *TLI;
1456   /// Parent function
1457   Function *TheFunction;
1458   /// Target Transform Info
1459   const TargetTransformInfo *TTI;
1460   /// Dominator Tree.
1461   DominatorTree *DT;
1462   // LoopAccess analysis.
1463   LoopAccessAnalysis *LAA;
1464   // And the loop-accesses info corresponding to this loop.  This pointer is
1465   // null until canVectorizeMemory sets it up.
1466   const LoopAccessInfo *LAI;
1467 
1468   /// The interleave access information contains groups of interleaved accesses
1469   /// with the same stride and close to each other.
1470   InterleavedAccessInfo InterleaveInfo;
1471 
1472   //  ---  vectorization state --- //
1473 
1474   /// Holds the integer induction variable. This is the counter of the
1475   /// loop.
1476   PHINode *Induction;
1477   /// Holds the reduction variables.
1478   ReductionList Reductions;
1479   /// Holds all of the induction variables that we found in the loop.
1480   /// Notice that inductions don't need to start at zero and that induction
1481   /// variables can be pointers.
1482   InductionList Inductions;
1483   /// Holds the phi nodes that are first-order recurrences.
1484   RecurrenceSet FirstOrderRecurrences;
1485   /// Holds the widest induction type encountered.
1486   Type *WidestIndTy;
1487 
1488   /// Allowed outside users. This holds the reduction
1489   /// vars which can be accessed from outside the loop.
1490   SmallPtrSet<Value *, 4> AllowedExit;
1491   /// This set holds the variables which are known to be uniform after
1492   /// vectorization.
1493   SmallPtrSet<Instruction *, 4> Uniforms;
1494 
1495   /// Can we assume the absence of NaNs.
1496   bool HasFunNoNaNAttr;
1497 
1498   /// Vectorization requirements that will go through late-evaluation.
1499   LoopVectorizationRequirements *Requirements;
1500 
1501   /// Used to emit an analysis of any legality issues.
1502   LoopVectorizeHints *Hints;
1503 
1504   ValueToValueMap Strides;
1505   SmallPtrSet<Value *, 8> StrideSet;
1506 
1507   /// While vectorizing these instructions we have to generate a
1508   /// call to the appropriate masked intrinsic
1509   SmallPtrSet<const Instruction *, 8> MaskedOp;
1510 };
1511 
1512 /// LoopVectorizationCostModel - estimates the expected speedups due to
1513 /// vectorization.
1514 /// In many cases vectorization is not profitable. This can happen because of
1515 /// a number of reasons. In this class we mainly attempt to predict the
1516 /// expected speedup/slowdowns due to the supported instruction set. We use the
1517 /// TargetTransformInfo to query the different backends for the cost of
1518 /// different operations.
1519 class LoopVectorizationCostModel {
1520 public:
1521   LoopVectorizationCostModel(Loop *L, ScalarEvolution *SE, LoopInfo *LI,
1522                              LoopVectorizationLegality *Legal,
1523                              const TargetTransformInfo &TTI,
1524                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1525                              AssumptionCache *AC, const Function *F,
1526                              const LoopVectorizeHints *Hints,
1527                              SmallPtrSetImpl<const Value *> &ValuesToIgnore)
1528       : TheLoop(L), SE(SE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB),
1529         TheFunction(F), Hints(Hints), ValuesToIgnore(ValuesToIgnore) {}
1530 
1531   /// Information about vectorization costs
1532   struct VectorizationFactor {
1533     unsigned Width; // Vector width with best cost
1534     unsigned Cost;  // Cost of the loop with that width
1535   };
1536   /// \return The most profitable vectorization factor and the cost of that VF.
1537   /// This method checks every power of two up to VF. If UserVF is not ZERO
1538   /// then this vectorization factor will be selected if vectorization is
1539   /// possible.
1540   VectorizationFactor selectVectorizationFactor(bool OptForSize);
1541 
1542   /// \return The size (in bits) of the smallest and widest types in the code
1543   /// that needs to be vectorized. We ignore values that remain scalar such as
1544   /// 64 bit loop indices.
1545   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1546 
1547   /// \return The desired interleave count.
1548   /// If interleave count has been specified by metadata it will be returned.
1549   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1550   /// are the selected vectorization factor and the cost of the selected VF.
1551   unsigned selectInterleaveCount(bool OptForSize, unsigned VF,
1552                                  unsigned LoopCost);
1553 
1554   /// \return The most profitable unroll factor.
1555   /// This method finds the best unroll-factor based on register pressure and
1556   /// other parameters. VF and LoopCost are the selected vectorization factor
1557   /// and the cost of the selected VF.
1558   unsigned computeInterleaveCount(bool OptForSize, unsigned VF,
1559                                   unsigned LoopCost);
1560 
1561   /// \brief A struct that represents some properties of the register usage
1562   /// of a loop.
1563   struct RegisterUsage {
1564     /// Holds the number of loop invariant values that are used in the loop.
1565     unsigned LoopInvariantRegs;
1566     /// Holds the maximum number of concurrent live intervals in the loop.
1567     unsigned MaxLocalUsers;
1568     /// Holds the number of instructions in the loop.
1569     unsigned NumInstructions;
1570   };
1571 
1572   /// \return Returns information about the register usages of the loop for the
1573   /// given vectorization factors.
1574   SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs);
1575 
1576 private:
1577   /// The vectorization cost is a combination of the cost itself and a boolean
1578   /// indicating whether any of the contributing operations will actually
1579   /// operate on
1580   /// vector values after type legalization in the backend. If this latter value
1581   /// is
1582   /// false, then all operations will be scalarized (i.e. no vectorization has
1583   /// actually taken place).
1584   typedef std::pair<unsigned, bool> VectorizationCostTy;
1585 
1586   /// Returns the expected execution cost. The unit of the cost does
1587   /// not matter because we use the 'cost' units to compare different
1588   /// vector widths. The cost that is returned is *not* normalized by
1589   /// the factor width.
1590   VectorizationCostTy expectedCost(unsigned VF);
1591 
1592   /// Returns the execution time cost of an instruction for a given vector
1593   /// width. Vector width of one means scalar.
1594   VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF);
1595 
1596   /// The cost-computation logic from getInstructionCost which provides
1597   /// the vector type as an output parameter.
1598   unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy);
1599 
1600   /// Returns whether the instruction is a load or store and will be a emitted
1601   /// as a vector operation.
1602   bool isConsecutiveLoadOrStore(Instruction *I);
1603 
1604   /// Report an analysis message to assist the user in diagnosing loops that are
1605   /// not vectorized.  These are handled as LoopAccessReport rather than
1606   /// VectorizationReport because the << operator of VectorizationReport returns
1607   /// LoopAccessReport.
1608   void emitAnalysis(const LoopAccessReport &Message) const {
1609     emitAnalysisDiag(TheFunction, TheLoop, *Hints, Message);
1610   }
1611 
1612 public:
1613   /// Map of scalar integer values to the smallest bitwidth they can be legally
1614   /// represented as. The vector equivalents of these values should be truncated
1615   /// to this type.
1616   MapVector<Instruction *, uint64_t> MinBWs;
1617 
1618   /// The loop that we evaluate.
1619   Loop *TheLoop;
1620   /// Scev analysis.
1621   ScalarEvolution *SE;
1622   /// Loop Info analysis.
1623   LoopInfo *LI;
1624   /// Vectorization legality.
1625   LoopVectorizationLegality *Legal;
1626   /// Vector target information.
1627   const TargetTransformInfo &TTI;
1628   /// Target Library Info.
1629   const TargetLibraryInfo *TLI;
1630   /// Demanded bits analysis
1631   DemandedBits *DB;
1632   const Function *TheFunction;
1633   // Loop Vectorize Hint.
1634   const LoopVectorizeHints *Hints;
1635   // Values to ignore in the cost model.
1636   const SmallPtrSetImpl<const Value *> &ValuesToIgnore;
1637 };
1638 
1639 /// \brief This holds vectorization requirements that must be verified late in
1640 /// the process. The requirements are set by legalize and costmodel. Once
1641 /// vectorization has been determined to be possible and profitable the
1642 /// requirements can be verified by looking for metadata or compiler options.
1643 /// For example, some loops require FP commutativity which is only allowed if
1644 /// vectorization is explicitly specified or if the fast-math compiler option
1645 /// has been provided.
1646 /// Late evaluation of these requirements allows helpful diagnostics to be
1647 /// composed that tells the user what need to be done to vectorize the loop. For
1648 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late
1649 /// evaluation should be used only when diagnostics can generated that can be
1650 /// followed by a non-expert user.
1651 class LoopVectorizationRequirements {
1652 public:
1653   LoopVectorizationRequirements()
1654       : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr) {}
1655 
1656   void addUnsafeAlgebraInst(Instruction *I) {
1657     // First unsafe algebra instruction.
1658     if (!UnsafeAlgebraInst)
1659       UnsafeAlgebraInst = I;
1660   }
1661 
1662   void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; }
1663 
1664   bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) {
1665     const char *Name = Hints.vectorizeAnalysisPassName();
1666     bool Failed = false;
1667     if (UnsafeAlgebraInst && !Hints.allowReordering()) {
1668       emitOptimizationRemarkAnalysisFPCommute(
1669           F->getContext(), Name, *F, UnsafeAlgebraInst->getDebugLoc(),
1670           VectorizationReport() << "cannot prove it is safe to reorder "
1671                                    "floating-point operations");
1672       Failed = true;
1673     }
1674 
1675     // Test if runtime memcheck thresholds are exceeded.
1676     bool PragmaThresholdReached =
1677         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
1678     bool ThresholdReached =
1679         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
1680     if ((ThresholdReached && !Hints.allowReordering()) ||
1681         PragmaThresholdReached) {
1682       emitOptimizationRemarkAnalysisAliasing(
1683           F->getContext(), Name, *F, L->getStartLoc(),
1684           VectorizationReport()
1685               << "cannot prove it is safe to reorder memory operations");
1686       DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
1687       Failed = true;
1688     }
1689 
1690     return Failed;
1691   }
1692 
1693 private:
1694   unsigned NumRuntimePointerChecks;
1695   Instruction *UnsafeAlgebraInst;
1696 };
1697 
1698 static void addInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) {
1699   if (L.empty())
1700     return V.push_back(&L);
1701 
1702   for (Loop *InnerL : L)
1703     addInnerLoop(*InnerL, V);
1704 }
1705 
1706 /// The LoopVectorize Pass.
1707 struct LoopVectorize : public FunctionPass {
1708   /// Pass identification, replacement for typeid
1709   static char ID;
1710 
1711   explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true)
1712       : FunctionPass(ID), DisableUnrolling(NoUnrolling),
1713         AlwaysVectorize(AlwaysVectorize) {
1714     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
1715   }
1716 
1717   ScalarEvolution *SE;
1718   LoopInfo *LI;
1719   TargetTransformInfo *TTI;
1720   DominatorTree *DT;
1721   BlockFrequencyInfo *BFI;
1722   TargetLibraryInfo *TLI;
1723   DemandedBits *DB;
1724   AliasAnalysis *AA;
1725   AssumptionCache *AC;
1726   LoopAccessAnalysis *LAA;
1727   bool DisableUnrolling;
1728   bool AlwaysVectorize;
1729 
1730   BlockFrequency ColdEntryFreq;
1731 
1732   bool runOnFunction(Function &F) override {
1733     if (skipFunction(F))
1734       return false;
1735 
1736     SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1737     LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1738     TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1739     DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1740     BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
1741     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1742     TLI = TLIP ? &TLIP->getTLI() : nullptr;
1743     AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1744     AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1745     LAA = &getAnalysis<LoopAccessAnalysis>();
1746     DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
1747 
1748     // Compute some weights outside of the loop over the loops. Compute this
1749     // using a BranchProbability to re-use its scaling math.
1750     const BranchProbability ColdProb(1, 5); // 20%
1751     ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb;
1752 
1753     // Don't attempt if
1754     // 1. the target claims to have no vector registers, and
1755     // 2. interleaving won't help ILP.
1756     //
1757     // The second condition is necessary because, even if the target has no
1758     // vector registers, loop vectorization may still enable scalar
1759     // interleaving.
1760     if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2)
1761       return false;
1762 
1763     // Build up a worklist of inner-loops to vectorize. This is necessary as
1764     // the act of vectorizing or partially unrolling a loop creates new loops
1765     // and can invalidate iterators across the loops.
1766     SmallVector<Loop *, 8> Worklist;
1767 
1768     for (Loop *L : *LI)
1769       addInnerLoop(*L, Worklist);
1770 
1771     LoopsAnalyzed += Worklist.size();
1772 
1773     // Now walk the identified inner loops.
1774     bool Changed = false;
1775     while (!Worklist.empty())
1776       Changed |= processLoop(Worklist.pop_back_val());
1777 
1778     // Process each loop nest in the function.
1779     return Changed;
1780   }
1781 
1782   static void AddRuntimeUnrollDisableMetaData(Loop *L) {
1783     SmallVector<Metadata *, 4> MDs;
1784     // Reserve first location for self reference to the LoopID metadata node.
1785     MDs.push_back(nullptr);
1786     bool IsUnrollMetadata = false;
1787     MDNode *LoopID = L->getLoopID();
1788     if (LoopID) {
1789       // First find existing loop unrolling disable metadata.
1790       for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
1791         MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
1792         if (MD) {
1793           const MDString *S = dyn_cast<MDString>(MD->getOperand(0));
1794           IsUnrollMetadata =
1795               S && S->getString().startswith("llvm.loop.unroll.disable");
1796         }
1797         MDs.push_back(LoopID->getOperand(i));
1798       }
1799     }
1800 
1801     if (!IsUnrollMetadata) {
1802       // Add runtime unroll disable metadata.
1803       LLVMContext &Context = L->getHeader()->getContext();
1804       SmallVector<Metadata *, 1> DisableOperands;
1805       DisableOperands.push_back(
1806           MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
1807       MDNode *DisableNode = MDNode::get(Context, DisableOperands);
1808       MDs.push_back(DisableNode);
1809       MDNode *NewLoopID = MDNode::get(Context, MDs);
1810       // Set operand 0 to refer to the loop id itself.
1811       NewLoopID->replaceOperandWith(0, NewLoopID);
1812       L->setLoopID(NewLoopID);
1813     }
1814   }
1815 
1816   bool processLoop(Loop *L) {
1817     assert(L->empty() && "Only process inner loops.");
1818 
1819 #ifndef NDEBUG
1820     const std::string DebugLocStr = getDebugLocString(L);
1821 #endif /* NDEBUG */
1822 
1823     DEBUG(dbgs() << "\nLV: Checking a loop in \""
1824                  << L->getHeader()->getParent()->getName() << "\" from "
1825                  << DebugLocStr << "\n");
1826 
1827     LoopVectorizeHints Hints(L, DisableUnrolling);
1828 
1829     DEBUG(dbgs() << "LV: Loop hints:"
1830                  << " force="
1831                  << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
1832                          ? "disabled"
1833                          : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
1834                                 ? "enabled"
1835                                 : "?"))
1836                  << " width=" << Hints.getWidth()
1837                  << " unroll=" << Hints.getInterleave() << "\n");
1838 
1839     // Function containing loop
1840     Function *F = L->getHeader()->getParent();
1841 
1842     // Looking at the diagnostic output is the only way to determine if a loop
1843     // was vectorized (other than looking at the IR or machine code), so it
1844     // is important to generate an optimization remark for each loop. Most of
1845     // these messages are generated by emitOptimizationRemarkAnalysis. Remarks
1846     // generated by emitOptimizationRemark and emitOptimizationRemarkMissed are
1847     // less verbose reporting vectorized loops and unvectorized loops that may
1848     // benefit from vectorization, respectively.
1849 
1850     if (!Hints.allowVectorization(F, L, AlwaysVectorize)) {
1851       DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
1852       return false;
1853     }
1854 
1855     // Check the loop for a trip count threshold:
1856     // do not vectorize loops with a tiny trip count.
1857     const unsigned TC = SE->getSmallConstantTripCount(L);
1858     if (TC > 0u && TC < TinyTripCountVectorThreshold) {
1859       DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
1860                    << "This loop is not worth vectorizing.");
1861       if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
1862         DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
1863       else {
1864         DEBUG(dbgs() << "\n");
1865         emitAnalysisDiag(F, L, Hints, VectorizationReport()
1866                                           << "vectorization is not beneficial "
1867                                              "and is not explicitly forced");
1868         return false;
1869       }
1870     }
1871 
1872     PredicatedScalarEvolution PSE(*SE, *L);
1873 
1874     // Check if it is legal to vectorize the loop.
1875     LoopVectorizationRequirements Requirements;
1876     LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, LAA,
1877                                   &Requirements, &Hints);
1878     if (!LVL.canVectorize()) {
1879       DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
1880       emitMissedWarning(F, L, Hints);
1881       return false;
1882     }
1883 
1884     // Collect values we want to ignore in the cost model. This includes
1885     // type-promoting instructions we identified during reduction detection.
1886     SmallPtrSet<const Value *, 32> ValuesToIgnore;
1887     CodeMetrics::collectEphemeralValues(L, AC, ValuesToIgnore);
1888     for (auto &Reduction : *LVL.getReductionVars()) {
1889       RecurrenceDescriptor &RedDes = Reduction.second;
1890       SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
1891       ValuesToIgnore.insert(Casts.begin(), Casts.end());
1892     }
1893 
1894     // Use the cost model.
1895     LoopVectorizationCostModel CM(L, PSE.getSE(), LI, &LVL, *TTI, TLI, DB, AC,
1896                                   F, &Hints, ValuesToIgnore);
1897 
1898     // Check the function attributes to find out if this function should be
1899     // optimized for size.
1900     bool OptForSize =
1901         Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize();
1902 
1903     // Compute the weighted frequency of this loop being executed and see if it
1904     // is less than 20% of the function entry baseline frequency. Note that we
1905     // always have a canonical loop here because we think we *can* vectorize.
1906     // FIXME: This is hidden behind a flag due to pervasive problems with
1907     // exactly what block frequency models.
1908     if (LoopVectorizeWithBlockFrequency) {
1909       BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader());
1910       if (Hints.getForce() != LoopVectorizeHints::FK_Enabled &&
1911           LoopEntryFreq < ColdEntryFreq)
1912         OptForSize = true;
1913     }
1914 
1915     // Check the function attributes to see if implicit floats are allowed.
1916     // FIXME: This check doesn't seem possibly correct -- what if the loop is
1917     // an integer loop and the vector instructions selected are purely integer
1918     // vector instructions?
1919     if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
1920       DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat"
1921                       "attribute is used.\n");
1922       emitAnalysisDiag(
1923           F, L, Hints,
1924           VectorizationReport()
1925               << "loop not vectorized due to NoImplicitFloat attribute");
1926       emitMissedWarning(F, L, Hints);
1927       return false;
1928     }
1929 
1930     // Check if the target supports potentially unsafe FP vectorization.
1931     // FIXME: Add a check for the type of safety issue (denormal, signaling)
1932     // for the target we're vectorizing for, to make sure none of the
1933     // additional fp-math flags can help.
1934     if (Hints.isPotentiallyUnsafe() &&
1935         TTI->isFPVectorizationPotentiallyUnsafe()) {
1936       DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n");
1937       emitAnalysisDiag(F, L, Hints,
1938                        VectorizationReport()
1939                            << "loop not vectorized due to unsafe FP support.");
1940       emitMissedWarning(F, L, Hints);
1941       return false;
1942     }
1943 
1944     // Select the optimal vectorization factor.
1945     const LoopVectorizationCostModel::VectorizationFactor VF =
1946         CM.selectVectorizationFactor(OptForSize);
1947 
1948     // Select the interleave count.
1949     unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost);
1950 
1951     // Get user interleave count.
1952     unsigned UserIC = Hints.getInterleave();
1953 
1954     // Identify the diagnostic messages that should be produced.
1955     std::string VecDiagMsg, IntDiagMsg;
1956     bool VectorizeLoop = true, InterleaveLoop = true;
1957 
1958     if (Requirements.doesNotMeet(F, L, Hints)) {
1959       DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
1960                       "requirements.\n");
1961       emitMissedWarning(F, L, Hints);
1962       return false;
1963     }
1964 
1965     if (VF.Width == 1) {
1966       DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
1967       VecDiagMsg =
1968           "the cost-model indicates that vectorization is not beneficial";
1969       VectorizeLoop = false;
1970     }
1971 
1972     if (IC == 1 && UserIC <= 1) {
1973       // Tell the user interleaving is not beneficial.
1974       DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
1975       IntDiagMsg =
1976           "the cost-model indicates that interleaving is not beneficial";
1977       InterleaveLoop = false;
1978       if (UserIC == 1)
1979         IntDiagMsg +=
1980             " and is explicitly disabled or interleave count is set to 1";
1981     } else if (IC > 1 && UserIC == 1) {
1982       // Tell the user interleaving is beneficial, but it explicitly disabled.
1983       DEBUG(dbgs()
1984             << "LV: Interleaving is beneficial but is explicitly disabled.");
1985       IntDiagMsg = "the cost-model indicates that interleaving is beneficial "
1986                    "but is explicitly disabled or interleave count is set to 1";
1987       InterleaveLoop = false;
1988     }
1989 
1990     // Override IC if user provided an interleave count.
1991     IC = UserIC > 0 ? UserIC : IC;
1992 
1993     // Emit diagnostic messages, if any.
1994     const char *VAPassName = Hints.vectorizeAnalysisPassName();
1995     if (!VectorizeLoop && !InterleaveLoop) {
1996       // Do not vectorize or interleaving the loop.
1997       emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F,
1998                                      L->getStartLoc(), VecDiagMsg);
1999       emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F,
2000                                      L->getStartLoc(), IntDiagMsg);
2001       return false;
2002     } else if (!VectorizeLoop && InterleaveLoop) {
2003       DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
2004       emitOptimizationRemarkAnalysis(F->getContext(), VAPassName, *F,
2005                                      L->getStartLoc(), VecDiagMsg);
2006     } else if (VectorizeLoop && !InterleaveLoop) {
2007       DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
2008                    << DebugLocStr << '\n');
2009       emitOptimizationRemarkAnalysis(F->getContext(), LV_NAME, *F,
2010                                      L->getStartLoc(), IntDiagMsg);
2011     } else if (VectorizeLoop && InterleaveLoop) {
2012       DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
2013                    << DebugLocStr << '\n');
2014       DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
2015     }
2016 
2017     if (!VectorizeLoop) {
2018       assert(IC > 1 && "interleave count should not be 1 or 0");
2019       // If we decided that it is not legal to vectorize the loop then
2020       // interleave it.
2021       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, IC);
2022       Unroller.vectorize(&LVL, CM.MinBWs);
2023 
2024       emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(),
2025                              Twine("interleaved loop (interleaved count: ") +
2026                                  Twine(IC) + ")");
2027     } else {
2028       // If we decided that it is *legal* to vectorize the loop then do it.
2029       InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, VF.Width, IC);
2030       LB.vectorize(&LVL, CM.MinBWs);
2031       ++LoopsVectorized;
2032 
2033       // Add metadata to disable runtime unrolling scalar loop when there's no
2034       // runtime check about strides and memory. Because at this situation,
2035       // scalar loop is rarely used not worthy to be unrolled.
2036       if (!LB.IsSafetyChecksAdded())
2037         AddRuntimeUnrollDisableMetaData(L);
2038 
2039       // Report the vectorization decision.
2040       emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(),
2041                              Twine("vectorized loop (vectorization width: ") +
2042                                  Twine(VF.Width) + ", interleaved count: " +
2043                                  Twine(IC) + ")");
2044     }
2045 
2046     // Mark the loop as already vectorized to avoid vectorizing again.
2047     Hints.setAlreadyVectorized();
2048 
2049     DEBUG(verifyFunction(*L->getHeader()->getParent()));
2050     return true;
2051   }
2052 
2053   void getAnalysisUsage(AnalysisUsage &AU) const override {
2054     AU.addRequired<AssumptionCacheTracker>();
2055     AU.addRequiredID(LoopSimplifyID);
2056     AU.addRequiredID(LCSSAID);
2057     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2058     AU.addRequired<DominatorTreeWrapperPass>();
2059     AU.addRequired<LoopInfoWrapperPass>();
2060     AU.addRequired<ScalarEvolutionWrapperPass>();
2061     AU.addRequired<TargetTransformInfoWrapperPass>();
2062     AU.addRequired<AAResultsWrapperPass>();
2063     AU.addRequired<LoopAccessAnalysis>();
2064     AU.addRequired<DemandedBitsWrapperPass>();
2065     AU.addPreserved<LoopInfoWrapperPass>();
2066     AU.addPreserved<DominatorTreeWrapperPass>();
2067     AU.addPreserved<BasicAAWrapperPass>();
2068     AU.addPreserved<GlobalsAAWrapperPass>();
2069   }
2070 };
2071 
2072 } // end anonymous namespace
2073 
2074 //===----------------------------------------------------------------------===//
2075 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2076 // LoopVectorizationCostModel.
2077 //===----------------------------------------------------------------------===//
2078 
2079 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2080   // We need to place the broadcast of invariant variables outside the loop.
2081   Instruction *Instr = dyn_cast<Instruction>(V);
2082   bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody);
2083   bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr;
2084 
2085   // Place the code for broadcasting invariant variables in the new preheader.
2086   IRBuilder<>::InsertPointGuard Guard(Builder);
2087   if (Invariant)
2088     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2089 
2090   // Broadcast the scalar into all locations in the vector.
2091   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2092 
2093   return Shuf;
2094 }
2095 
2096 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx,
2097                                           const SCEV *StepSCEV) {
2098   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2099   SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2100   Value *StepValue = Exp.expandCodeFor(StepSCEV, StepSCEV->getType(),
2101                                        &*Builder.GetInsertPoint());
2102   return getStepVector(Val, StartIdx, StepValue);
2103 }
2104 
2105 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx,
2106                                           Value *Step) {
2107   assert(Val->getType()->isVectorTy() && "Must be a vector");
2108   assert(Val->getType()->getScalarType()->isIntegerTy() &&
2109          "Elem must be an integer");
2110   assert(Step->getType() == Val->getType()->getScalarType() &&
2111          "Step has wrong type");
2112   // Create the types.
2113   Type *ITy = Val->getType()->getScalarType();
2114   VectorType *Ty = cast<VectorType>(Val->getType());
2115   int VLen = Ty->getNumElements();
2116   SmallVector<Constant *, 8> Indices;
2117 
2118   // Create a vector of consecutive numbers from zero to VF.
2119   for (int i = 0; i < VLen; ++i)
2120     Indices.push_back(ConstantInt::get(ITy, StartIdx + i));
2121 
2122   // Add the consecutive indices to the vector value.
2123   Constant *Cv = ConstantVector::get(Indices);
2124   assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
2125   Step = Builder.CreateVectorSplat(VLen, Step);
2126   assert(Step->getType() == Val->getType() && "Invalid step vec");
2127   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2128   // which can be found from the original scalar operations.
2129   Step = Builder.CreateMul(Cv, Step);
2130   return Builder.CreateAdd(Val, Step, "induction");
2131 }
2132 
2133 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
2134   assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr");
2135   auto *SE = PSE.getSE();
2136   // Make sure that the pointer does not point to structs.
2137   if (Ptr->getType()->getPointerElementType()->isAggregateType())
2138     return 0;
2139 
2140   // If this value is a pointer induction variable we know it is consecutive.
2141   PHINode *Phi = dyn_cast_or_null<PHINode>(Ptr);
2142   if (Phi && Inductions.count(Phi)) {
2143     InductionDescriptor II = Inductions[Phi];
2144     return II.getConsecutiveDirection();
2145   }
2146 
2147   GetElementPtrInst *Gep = getGEPInstruction(Ptr);
2148   if (!Gep)
2149     return 0;
2150 
2151   unsigned NumOperands = Gep->getNumOperands();
2152   Value *GpPtr = Gep->getPointerOperand();
2153   // If this GEP value is a consecutive pointer induction variable and all of
2154   // the indices are constant then we know it is consecutive. We can
2155   Phi = dyn_cast<PHINode>(GpPtr);
2156   if (Phi && Inductions.count(Phi)) {
2157 
2158     // Make sure that the pointer does not point to structs.
2159     PointerType *GepPtrType = cast<PointerType>(GpPtr->getType());
2160     if (GepPtrType->getElementType()->isAggregateType())
2161       return 0;
2162 
2163     // Make sure that all of the index operands are loop invariant.
2164     for (unsigned i = 1; i < NumOperands; ++i)
2165       if (!SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop))
2166         return 0;
2167 
2168     InductionDescriptor II = Inductions[Phi];
2169     return II.getConsecutiveDirection();
2170   }
2171 
2172   unsigned InductionOperand = getGEPInductionOperand(Gep);
2173 
2174   // Check that all of the gep indices are uniform except for our induction
2175   // operand.
2176   for (unsigned i = 0; i != NumOperands; ++i)
2177     if (i != InductionOperand &&
2178         !SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop))
2179       return 0;
2180 
2181   // We can emit wide load/stores only if the last non-zero index is the
2182   // induction variable.
2183   const SCEV *Last = nullptr;
2184   if (!Strides.count(Gep))
2185     Last = PSE.getSCEV(Gep->getOperand(InductionOperand));
2186   else {
2187     // Because of the multiplication by a stride we can have a s/zext cast.
2188     // We are going to replace this stride by 1 so the cast is safe to ignore.
2189     //
2190     //  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
2191     //  %0 = trunc i64 %indvars.iv to i32
2192     //  %mul = mul i32 %0, %Stride1
2193     //  %idxprom = zext i32 %mul to i64  << Safe cast.
2194     //  %arrayidx = getelementptr inbounds i32* %B, i64 %idxprom
2195     //
2196     Last = replaceSymbolicStrideSCEV(PSE, Strides,
2197                                      Gep->getOperand(InductionOperand), Gep);
2198     if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(Last))
2199       Last =
2200           (C->getSCEVType() == scSignExtend || C->getSCEVType() == scZeroExtend)
2201               ? C->getOperand()
2202               : Last;
2203   }
2204   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Last)) {
2205     const SCEV *Step = AR->getStepRecurrence(*SE);
2206 
2207     // The memory is consecutive because the last index is consecutive
2208     // and all other indices are loop invariant.
2209     if (Step->isOne())
2210       return 1;
2211     if (Step->isAllOnesValue())
2212       return -1;
2213   }
2214 
2215   return 0;
2216 }
2217 
2218 bool LoopVectorizationLegality::isUniform(Value *V) {
2219   return LAI->isUniform(V);
2220 }
2221 
2222 InnerLoopVectorizer::VectorParts &
2223 InnerLoopVectorizer::getVectorValue(Value *V) {
2224   assert(V != Induction && "The new induction variable should not be used.");
2225   assert(!V->getType()->isVectorTy() && "Can't widen a vector");
2226 
2227   // If we have a stride that is replaced by one, do it here.
2228   if (Legal->hasStride(V))
2229     V = ConstantInt::get(V->getType(), 1);
2230 
2231   // If we have this scalar in the map, return it.
2232   if (WidenMap.has(V))
2233     return WidenMap.get(V);
2234 
2235   // If this scalar is unknown, assume that it is a constant or that it is
2236   // loop invariant. Broadcast V and save the value for future uses.
2237   Value *B = getBroadcastInstrs(V);
2238   return WidenMap.splat(V, B);
2239 }
2240 
2241 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2242   assert(Vec->getType()->isVectorTy() && "Invalid type");
2243   SmallVector<Constant *, 8> ShuffleMask;
2244   for (unsigned i = 0; i < VF; ++i)
2245     ShuffleMask.push_back(Builder.getInt32(VF - i - 1));
2246 
2247   return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
2248                                      ConstantVector::get(ShuffleMask),
2249                                      "reverse");
2250 }
2251 
2252 // Get a mask to interleave \p NumVec vectors into a wide vector.
2253 // I.e.  <0, VF, VF*2, ..., VF*(NumVec-1), 1, VF+1, VF*2+1, ...>
2254 // E.g. For 2 interleaved vectors, if VF is 4, the mask is:
2255 //      <0, 4, 1, 5, 2, 6, 3, 7>
2256 static Constant *getInterleavedMask(IRBuilder<> &Builder, unsigned VF,
2257                                     unsigned NumVec) {
2258   SmallVector<Constant *, 16> Mask;
2259   for (unsigned i = 0; i < VF; i++)
2260     for (unsigned j = 0; j < NumVec; j++)
2261       Mask.push_back(Builder.getInt32(j * VF + i));
2262 
2263   return ConstantVector::get(Mask);
2264 }
2265 
2266 // Get the strided mask starting from index \p Start.
2267 // I.e.  <Start, Start + Stride, ..., Start + Stride*(VF-1)>
2268 static Constant *getStridedMask(IRBuilder<> &Builder, unsigned Start,
2269                                 unsigned Stride, unsigned VF) {
2270   SmallVector<Constant *, 16> Mask;
2271   for (unsigned i = 0; i < VF; i++)
2272     Mask.push_back(Builder.getInt32(Start + i * Stride));
2273 
2274   return ConstantVector::get(Mask);
2275 }
2276 
2277 // Get a mask of two parts: The first part consists of sequential integers
2278 // starting from 0, The second part consists of UNDEFs.
2279 // I.e. <0, 1, 2, ..., NumInt - 1, undef, ..., undef>
2280 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned NumInt,
2281                                    unsigned NumUndef) {
2282   SmallVector<Constant *, 16> Mask;
2283   for (unsigned i = 0; i < NumInt; i++)
2284     Mask.push_back(Builder.getInt32(i));
2285 
2286   Constant *Undef = UndefValue::get(Builder.getInt32Ty());
2287   for (unsigned i = 0; i < NumUndef; i++)
2288     Mask.push_back(Undef);
2289 
2290   return ConstantVector::get(Mask);
2291 }
2292 
2293 // Concatenate two vectors with the same element type. The 2nd vector should
2294 // not have more elements than the 1st vector. If the 2nd vector has less
2295 // elements, extend it with UNDEFs.
2296 static Value *ConcatenateTwoVectors(IRBuilder<> &Builder, Value *V1,
2297                                     Value *V2) {
2298   VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType());
2299   VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType());
2300   assert(VecTy1 && VecTy2 &&
2301          VecTy1->getScalarType() == VecTy2->getScalarType() &&
2302          "Expect two vectors with the same element type");
2303 
2304   unsigned NumElts1 = VecTy1->getNumElements();
2305   unsigned NumElts2 = VecTy2->getNumElements();
2306   assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements");
2307 
2308   if (NumElts1 > NumElts2) {
2309     // Extend with UNDEFs.
2310     Constant *ExtMask =
2311         getSequentialMask(Builder, NumElts2, NumElts1 - NumElts2);
2312     V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask);
2313   }
2314 
2315   Constant *Mask = getSequentialMask(Builder, NumElts1 + NumElts2, 0);
2316   return Builder.CreateShuffleVector(V1, V2, Mask);
2317 }
2318 
2319 // Concatenate vectors in the given list. All vectors have the same type.
2320 static Value *ConcatenateVectors(IRBuilder<> &Builder,
2321                                  ArrayRef<Value *> InputList) {
2322   unsigned NumVec = InputList.size();
2323   assert(NumVec > 1 && "Should be at least two vectors");
2324 
2325   SmallVector<Value *, 8> ResList;
2326   ResList.append(InputList.begin(), InputList.end());
2327   do {
2328     SmallVector<Value *, 8> TmpList;
2329     for (unsigned i = 0; i < NumVec - 1; i += 2) {
2330       Value *V0 = ResList[i], *V1 = ResList[i + 1];
2331       assert((V0->getType() == V1->getType() || i == NumVec - 2) &&
2332              "Only the last vector may have a different type");
2333 
2334       TmpList.push_back(ConcatenateTwoVectors(Builder, V0, V1));
2335     }
2336 
2337     // Push the last vector if the total number of vectors is odd.
2338     if (NumVec % 2 != 0)
2339       TmpList.push_back(ResList[NumVec - 1]);
2340 
2341     ResList = TmpList;
2342     NumVec = ResList.size();
2343   } while (NumVec > 1);
2344 
2345   return ResList[0];
2346 }
2347 
2348 // Try to vectorize the interleave group that \p Instr belongs to.
2349 //
2350 // E.g. Translate following interleaved load group (factor = 3):
2351 //   for (i = 0; i < N; i+=3) {
2352 //     R = Pic[i];             // Member of index 0
2353 //     G = Pic[i+1];           // Member of index 1
2354 //     B = Pic[i+2];           // Member of index 2
2355 //     ... // do something to R, G, B
2356 //   }
2357 // To:
2358 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2359 //   %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9>   ; R elements
2360 //   %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10>  ; G elements
2361 //   %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11>  ; B elements
2362 //
2363 // Or translate following interleaved store group (factor = 3):
2364 //   for (i = 0; i < N; i+=3) {
2365 //     ... do something to R, G, B
2366 //     Pic[i]   = R;           // Member of index 0
2367 //     Pic[i+1] = G;           // Member of index 1
2368 //     Pic[i+2] = B;           // Member of index 2
2369 //   }
2370 // To:
2371 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2372 //   %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u>
2373 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2374 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2375 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2376 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) {
2377   const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr);
2378   assert(Group && "Fail to get an interleaved access group.");
2379 
2380   // Skip if current instruction is not the insert position.
2381   if (Instr != Group->getInsertPos())
2382     return;
2383 
2384   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2385   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2386   Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand();
2387 
2388   // Prepare for the vector type of the interleaved load/store.
2389   Type *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
2390   unsigned InterleaveFactor = Group->getFactor();
2391   Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF);
2392   Type *PtrTy = VecTy->getPointerTo(Ptr->getType()->getPointerAddressSpace());
2393 
2394   // Prepare for the new pointers.
2395   setDebugLocFromInst(Builder, Ptr);
2396   VectorParts &PtrParts = getVectorValue(Ptr);
2397   SmallVector<Value *, 2> NewPtrs;
2398   unsigned Index = Group->getIndex(Instr);
2399   for (unsigned Part = 0; Part < UF; Part++) {
2400     // Extract the pointer for current instruction from the pointer vector. A
2401     // reverse access uses the pointer in the last lane.
2402     Value *NewPtr = Builder.CreateExtractElement(
2403         PtrParts[Part],
2404         Group->isReverse() ? Builder.getInt32(VF - 1) : Builder.getInt32(0));
2405 
2406     // Notice current instruction could be any index. Need to adjust the address
2407     // to the member of index 0.
2408     //
2409     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2410     //       b = A[i];       // Member of index 0
2411     // Current pointer is pointed to A[i+1], adjust it to A[i].
2412     //
2413     // E.g.  A[i+1] = a;     // Member of index 1
2414     //       A[i]   = b;     // Member of index 0
2415     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2416     // Current pointer is pointed to A[i+2], adjust it to A[i].
2417     NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index));
2418 
2419     // Cast to the vector pointer type.
2420     NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy));
2421   }
2422 
2423   setDebugLocFromInst(Builder, Instr);
2424   Value *UndefVec = UndefValue::get(VecTy);
2425 
2426   // Vectorize the interleaved load group.
2427   if (LI) {
2428     for (unsigned Part = 0; Part < UF; Part++) {
2429       Instruction *NewLoadInstr = Builder.CreateAlignedLoad(
2430           NewPtrs[Part], Group->getAlignment(), "wide.vec");
2431 
2432       for (unsigned i = 0; i < InterleaveFactor; i++) {
2433         Instruction *Member = Group->getMember(i);
2434 
2435         // Skip the gaps in the group.
2436         if (!Member)
2437           continue;
2438 
2439         Constant *StrideMask = getStridedMask(Builder, i, InterleaveFactor, VF);
2440         Value *StridedVec = Builder.CreateShuffleVector(
2441             NewLoadInstr, UndefVec, StrideMask, "strided.vec");
2442 
2443         // If this member has different type, cast the result type.
2444         if (Member->getType() != ScalarTy) {
2445           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2446           StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy);
2447         }
2448 
2449         VectorParts &Entry = WidenMap.get(Member);
2450         Entry[Part] =
2451             Group->isReverse() ? reverseVector(StridedVec) : StridedVec;
2452       }
2453 
2454       addMetadata(NewLoadInstr, Instr);
2455     }
2456     return;
2457   }
2458 
2459   // The sub vector type for current instruction.
2460   VectorType *SubVT = VectorType::get(ScalarTy, VF);
2461 
2462   // Vectorize the interleaved store group.
2463   for (unsigned Part = 0; Part < UF; Part++) {
2464     // Collect the stored vector from each member.
2465     SmallVector<Value *, 4> StoredVecs;
2466     for (unsigned i = 0; i < InterleaveFactor; i++) {
2467       // Interleaved store group doesn't allow a gap, so each index has a member
2468       Instruction *Member = Group->getMember(i);
2469       assert(Member && "Fail to get a member from an interleaved store group");
2470 
2471       Value *StoredVec =
2472           getVectorValue(dyn_cast<StoreInst>(Member)->getValueOperand())[Part];
2473       if (Group->isReverse())
2474         StoredVec = reverseVector(StoredVec);
2475 
2476       // If this member has different type, cast it to an unified type.
2477       if (StoredVec->getType() != SubVT)
2478         StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT);
2479 
2480       StoredVecs.push_back(StoredVec);
2481     }
2482 
2483     // Concatenate all vectors into a wide vector.
2484     Value *WideVec = ConcatenateVectors(Builder, StoredVecs);
2485 
2486     // Interleave the elements in the wide vector.
2487     Constant *IMask = getInterleavedMask(Builder, VF, InterleaveFactor);
2488     Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask,
2489                                               "interleaved.vec");
2490 
2491     Instruction *NewStoreInstr =
2492         Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment());
2493     addMetadata(NewStoreInstr, Instr);
2494   }
2495 }
2496 
2497 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) {
2498   // Attempt to issue a wide load.
2499   LoadInst *LI = dyn_cast<LoadInst>(Instr);
2500   StoreInst *SI = dyn_cast<StoreInst>(Instr);
2501 
2502   assert((LI || SI) && "Invalid Load/Store instruction");
2503 
2504   // Try to vectorize the interleave group if this access is interleaved.
2505   if (Legal->isAccessInterleaved(Instr))
2506     return vectorizeInterleaveGroup(Instr);
2507 
2508   Type *ScalarDataTy = LI ? LI->getType() : SI->getValueOperand()->getType();
2509   Type *DataTy = VectorType::get(ScalarDataTy, VF);
2510   Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand();
2511   unsigned Alignment = LI ? LI->getAlignment() : SI->getAlignment();
2512   // An alignment of 0 means target abi alignment. We need to use the scalar's
2513   // target abi alignment in such a case.
2514   const DataLayout &DL = Instr->getModule()->getDataLayout();
2515   if (!Alignment)
2516     Alignment = DL.getABITypeAlignment(ScalarDataTy);
2517   unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
2518   unsigned ScalarAllocatedSize = DL.getTypeAllocSize(ScalarDataTy);
2519   unsigned VectorElementSize = DL.getTypeStoreSize(DataTy) / VF;
2520 
2521   if (SI && Legal->blockNeedsPredication(SI->getParent()) &&
2522       !Legal->isMaskRequired(SI))
2523     return scalarizeInstruction(Instr, true);
2524 
2525   if (ScalarAllocatedSize != VectorElementSize)
2526     return scalarizeInstruction(Instr);
2527 
2528   // If the pointer is loop invariant scalarize the load.
2529   if (LI && Legal->isUniform(Ptr))
2530     return scalarizeInstruction(Instr);
2531 
2532   // If the pointer is non-consecutive and gather/scatter is not supported
2533   // scalarize the instruction.
2534   int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
2535   bool Reverse = ConsecutiveStride < 0;
2536   bool CreateGatherScatter =
2537       !ConsecutiveStride && ((LI && Legal->isLegalMaskedGather(ScalarDataTy)) ||
2538                              (SI && Legal->isLegalMaskedScatter(ScalarDataTy)));
2539 
2540   if (!ConsecutiveStride && !CreateGatherScatter)
2541     return scalarizeInstruction(Instr);
2542 
2543   Constant *Zero = Builder.getInt32(0);
2544   VectorParts &Entry = WidenMap.get(Instr);
2545   VectorParts VectorGep;
2546 
2547   // Handle consecutive loads/stores.
2548   GetElementPtrInst *Gep = getGEPInstruction(Ptr);
2549   if (ConsecutiveStride) {
2550     if (Gep && Legal->isInductionVariable(Gep->getPointerOperand())) {
2551       setDebugLocFromInst(Builder, Gep);
2552       Value *PtrOperand = Gep->getPointerOperand();
2553       Value *FirstBasePtr = getVectorValue(PtrOperand)[0];
2554       FirstBasePtr = Builder.CreateExtractElement(FirstBasePtr, Zero);
2555 
2556       // Create the new GEP with the new induction variable.
2557       GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone());
2558       Gep2->setOperand(0, FirstBasePtr);
2559       Gep2->setName("gep.indvar.base");
2560       Ptr = Builder.Insert(Gep2);
2561     } else if (Gep) {
2562       setDebugLocFromInst(Builder, Gep);
2563       assert(PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getPointerOperand()),
2564                                           OrigLoop) &&
2565              "Base ptr must be invariant");
2566       // The last index does not have to be the induction. It can be
2567       // consecutive and be a function of the index. For example A[I+1];
2568       unsigned NumOperands = Gep->getNumOperands();
2569       unsigned InductionOperand = getGEPInductionOperand(Gep);
2570       // Create the new GEP with the new induction variable.
2571       GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone());
2572 
2573       for (unsigned i = 0; i < NumOperands; ++i) {
2574         Value *GepOperand = Gep->getOperand(i);
2575         Instruction *GepOperandInst = dyn_cast<Instruction>(GepOperand);
2576 
2577         // Update last index or loop invariant instruction anchored in loop.
2578         if (i == InductionOperand ||
2579             (GepOperandInst && OrigLoop->contains(GepOperandInst))) {
2580           assert((i == InductionOperand ||
2581                   PSE.getSE()->isLoopInvariant(PSE.getSCEV(GepOperandInst),
2582                                                OrigLoop)) &&
2583                  "Must be last index or loop invariant");
2584 
2585           VectorParts &GEPParts = getVectorValue(GepOperand);
2586           Value *Index = GEPParts[0];
2587           Index = Builder.CreateExtractElement(Index, Zero);
2588           Gep2->setOperand(i, Index);
2589           Gep2->setName("gep.indvar.idx");
2590         }
2591       }
2592       Ptr = Builder.Insert(Gep2);
2593     } else { // No GEP
2594       // Use the induction element ptr.
2595       assert(isa<PHINode>(Ptr) && "Invalid induction ptr");
2596       setDebugLocFromInst(Builder, Ptr);
2597       VectorParts &PtrVal = getVectorValue(Ptr);
2598       Ptr = Builder.CreateExtractElement(PtrVal[0], Zero);
2599     }
2600   } else {
2601     // At this point we should vector version of GEP for Gather or Scatter
2602     assert(CreateGatherScatter && "The instruction should be scalarized");
2603     if (Gep) {
2604       SmallVector<VectorParts, 4> OpsV;
2605       // Vectorizing GEP, across UF parts, we want to keep each loop-invariant
2606       // base or index of GEP scalar
2607       for (Value *Op : Gep->operands()) {
2608         if (PSE.getSE()->isLoopInvariant(PSE.getSCEV(Op), OrigLoop))
2609           OpsV.push_back(VectorParts(UF, Op));
2610         else
2611           OpsV.push_back(getVectorValue(Op));
2612       }
2613 
2614       for (unsigned Part = 0; Part < UF; ++Part) {
2615         SmallVector<Value *, 4> Ops;
2616         Value *GEPBasePtr = OpsV[0][Part];
2617         for (unsigned i = 1; i < Gep->getNumOperands(); i++)
2618           Ops.push_back(OpsV[i][Part]);
2619         Value *NewGep =
2620             Builder.CreateGEP(nullptr, GEPBasePtr, Ops, "VectorGep");
2621         assert(NewGep->getType()->isVectorTy() && "Expected vector GEP");
2622         NewGep =
2623             Builder.CreateBitCast(NewGep, VectorType::get(Ptr->getType(), VF));
2624         VectorGep.push_back(NewGep);
2625       }
2626     } else
2627       VectorGep = getVectorValue(Ptr);
2628   }
2629 
2630   VectorParts Mask = createBlockInMask(Instr->getParent());
2631   // Handle Stores:
2632   if (SI) {
2633     assert(!Legal->isUniform(SI->getPointerOperand()) &&
2634            "We do not allow storing to uniform addresses");
2635     setDebugLocFromInst(Builder, SI);
2636     // We don't want to update the value in the map as it might be used in
2637     // another expression. So don't use a reference type for "StoredVal".
2638     VectorParts StoredVal = getVectorValue(SI->getValueOperand());
2639 
2640     for (unsigned Part = 0; Part < UF; ++Part) {
2641       Instruction *NewSI = nullptr;
2642       if (CreateGatherScatter) {
2643         Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr;
2644         NewSI = Builder.CreateMaskedScatter(StoredVal[Part], VectorGep[Part],
2645                                             Alignment, MaskPart);
2646       } else {
2647         // Calculate the pointer for the specific unroll-part.
2648         Value *PartPtr =
2649             Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF));
2650 
2651         if (Reverse) {
2652           // If we store to reverse consecutive memory locations, then we need
2653           // to reverse the order of elements in the stored value.
2654           StoredVal[Part] = reverseVector(StoredVal[Part]);
2655           // If the address is consecutive but reversed, then the
2656           // wide store needs to start at the last vector element.
2657           PartPtr =
2658               Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF));
2659           PartPtr =
2660               Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF));
2661           Mask[Part] = reverseVector(Mask[Part]);
2662         }
2663 
2664         Value *VecPtr =
2665             Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2666 
2667         if (Legal->isMaskRequired(SI))
2668           NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment,
2669                                             Mask[Part]);
2670         else
2671           NewSI =
2672               Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment);
2673       }
2674       addMetadata(NewSI, SI);
2675     }
2676     return;
2677   }
2678 
2679   // Handle loads.
2680   assert(LI && "Must have a load instruction");
2681   setDebugLocFromInst(Builder, LI);
2682   for (unsigned Part = 0; Part < UF; ++Part) {
2683     Instruction *NewLI;
2684     if (CreateGatherScatter) {
2685       Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr;
2686       NewLI = Builder.CreateMaskedGather(VectorGep[Part], Alignment, MaskPart,
2687                                          0, "wide.masked.gather");
2688       Entry[Part] = NewLI;
2689     } else {
2690       // Calculate the pointer for the specific unroll-part.
2691       Value *PartPtr =
2692           Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF));
2693 
2694       if (Reverse) {
2695         // If the address is consecutive but reversed, then the
2696         // wide load needs to start at the last vector element.
2697         PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF));
2698         PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF));
2699         Mask[Part] = reverseVector(Mask[Part]);
2700       }
2701 
2702       Value *VecPtr =
2703           Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
2704       if (Legal->isMaskRequired(LI))
2705         NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part],
2706                                          UndefValue::get(DataTy),
2707                                          "wide.masked.load");
2708       else
2709         NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load");
2710       Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI;
2711     }
2712     addMetadata(NewLI, LI);
2713   }
2714 }
2715 
2716 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2717                                                bool IfPredicateStore) {
2718   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
2719   // Holds vector parameters or scalars, in case of uniform vals.
2720   SmallVector<VectorParts, 4> Params;
2721 
2722   setDebugLocFromInst(Builder, Instr);
2723 
2724   // Find all of the vectorized parameters.
2725   for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
2726     Value *SrcOp = Instr->getOperand(op);
2727 
2728     // If we are accessing the old induction variable, use the new one.
2729     if (SrcOp == OldInduction) {
2730       Params.push_back(getVectorValue(SrcOp));
2731       continue;
2732     }
2733 
2734     // Try using previously calculated values.
2735     Instruction *SrcInst = dyn_cast<Instruction>(SrcOp);
2736 
2737     // If the src is an instruction that appeared earlier in the basic block,
2738     // then it should already be vectorized.
2739     if (SrcInst && OrigLoop->contains(SrcInst)) {
2740       assert(WidenMap.has(SrcInst) && "Source operand is unavailable");
2741       // The parameter is a vector value from earlier.
2742       Params.push_back(WidenMap.get(SrcInst));
2743     } else {
2744       // The parameter is a scalar from outside the loop. Maybe even a constant.
2745       VectorParts Scalars;
2746       Scalars.append(UF, SrcOp);
2747       Params.push_back(Scalars);
2748     }
2749   }
2750 
2751   assert(Params.size() == Instr->getNumOperands() &&
2752          "Invalid number of operands");
2753 
2754   // Does this instruction return a value ?
2755   bool IsVoidRetTy = Instr->getType()->isVoidTy();
2756 
2757   Value *UndefVec =
2758       IsVoidRetTy ? nullptr
2759                   : UndefValue::get(VectorType::get(Instr->getType(), VF));
2760   // Create a new entry in the WidenMap and initialize it to Undef or Null.
2761   VectorParts &VecResults = WidenMap.splat(Instr, UndefVec);
2762 
2763   VectorParts Cond;
2764   if (IfPredicateStore) {
2765     assert(Instr->getParent()->getSinglePredecessor() &&
2766            "Only support single predecessor blocks");
2767     Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(),
2768                           Instr->getParent());
2769   }
2770 
2771   // For each vector unroll 'part':
2772   for (unsigned Part = 0; Part < UF; ++Part) {
2773     // For each scalar that we create:
2774     for (unsigned Width = 0; Width < VF; ++Width) {
2775 
2776       // Start if-block.
2777       Value *Cmp = nullptr;
2778       if (IfPredicateStore) {
2779         Cmp = Builder.CreateExtractElement(Cond[Part], Builder.getInt32(Width));
2780         Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp,
2781                                  ConstantInt::get(Cmp->getType(), 1));
2782       }
2783 
2784       Instruction *Cloned = Instr->clone();
2785       if (!IsVoidRetTy)
2786         Cloned->setName(Instr->getName() + ".cloned");
2787       // Replace the operands of the cloned instructions with extracted scalars.
2788       for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
2789         Value *Op = Params[op][Part];
2790         // Param is a vector. Need to extract the right lane.
2791         if (Op->getType()->isVectorTy())
2792           Op = Builder.CreateExtractElement(Op, Builder.getInt32(Width));
2793         Cloned->setOperand(op, Op);
2794       }
2795       addNewMetadata(Cloned, Instr);
2796 
2797       // Place the cloned scalar in the new loop.
2798       Builder.Insert(Cloned);
2799 
2800       // If we just cloned a new assumption, add it the assumption cache.
2801       if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
2802         if (II->getIntrinsicID() == Intrinsic::assume)
2803           AC->registerAssumption(II);
2804 
2805       // If the original scalar returns a value we need to place it in a vector
2806       // so that future users will be able to use it.
2807       if (!IsVoidRetTy)
2808         VecResults[Part] = Builder.CreateInsertElement(VecResults[Part], Cloned,
2809                                                        Builder.getInt32(Width));
2810       // End if-block.
2811       if (IfPredicateStore)
2812         PredicatedStores.push_back(
2813             std::make_pair(cast<StoreInst>(Cloned), Cmp));
2814     }
2815   }
2816 }
2817 
2818 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
2819                                                       Value *End, Value *Step,
2820                                                       Instruction *DL) {
2821   BasicBlock *Header = L->getHeader();
2822   BasicBlock *Latch = L->getLoopLatch();
2823   // As we're just creating this loop, it's possible no latch exists
2824   // yet. If so, use the header as this will be a single block loop.
2825   if (!Latch)
2826     Latch = Header;
2827 
2828   IRBuilder<> Builder(&*Header->getFirstInsertionPt());
2829   setDebugLocFromInst(Builder, getDebugLocFromInstOrOperands(OldInduction));
2830   auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
2831 
2832   Builder.SetInsertPoint(Latch->getTerminator());
2833 
2834   // Create i+1 and fill the PHINode.
2835   Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
2836   Induction->addIncoming(Start, L->getLoopPreheader());
2837   Induction->addIncoming(Next, Latch);
2838   // Create the compare.
2839   Value *ICmp = Builder.CreateICmpEQ(Next, End);
2840   Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
2841 
2842   // Now we have two terminators. Remove the old one from the block.
2843   Latch->getTerminator()->eraseFromParent();
2844 
2845   return Induction;
2846 }
2847 
2848 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
2849   if (TripCount)
2850     return TripCount;
2851 
2852   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2853   // Find the loop boundaries.
2854   ScalarEvolution *SE = PSE.getSE();
2855   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
2856   assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
2857          "Invalid loop count");
2858 
2859   Type *IdxTy = Legal->getWidestInductionType();
2860 
2861   // The exit count might have the type of i64 while the phi is i32. This can
2862   // happen if we have an induction variable that is sign extended before the
2863   // compare. The only way that we get a backedge taken count is that the
2864   // induction variable was signed and as such will not overflow. In such a case
2865   // truncation is legal.
2866   if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() >
2867       IdxTy->getPrimitiveSizeInBits())
2868     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
2869   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
2870 
2871   // Get the total trip count from the count by adding 1.
2872   const SCEV *ExitCount = SE->getAddExpr(
2873       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
2874 
2875   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
2876 
2877   // Expand the trip count and place the new instructions in the preheader.
2878   // Notice that the pre-header does not change, only the loop body.
2879   SCEVExpander Exp(*SE, DL, "induction");
2880 
2881   // Count holds the overall loop count (N).
2882   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
2883                                 L->getLoopPreheader()->getTerminator());
2884 
2885   if (TripCount->getType()->isPointerTy())
2886     TripCount =
2887         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
2888                                     L->getLoopPreheader()->getTerminator());
2889 
2890   return TripCount;
2891 }
2892 
2893 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
2894   if (VectorTripCount)
2895     return VectorTripCount;
2896 
2897   Value *TC = getOrCreateTripCount(L);
2898   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
2899 
2900   // Now we need to generate the expression for the part of the loop that the
2901   // vectorized body will execute. This is equal to N - (N % Step) if scalar
2902   // iterations are not required for correctness, or N - Step, otherwise. Step
2903   // is equal to the vectorization factor (number of SIMD elements) times the
2904   // unroll factor (number of SIMD instructions).
2905   Constant *Step = ConstantInt::get(TC->getType(), VF * UF);
2906   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
2907 
2908   // If there is a non-reversed interleaved group that may speculatively access
2909   // memory out-of-bounds, we need to ensure that there will be at least one
2910   // iteration of the scalar epilogue loop. Thus, if the step evenly divides
2911   // the trip count, we set the remainder to be equal to the step. If the step
2912   // does not evenly divide the trip count, no adjustment is necessary since
2913   // there will already be scalar iterations. Note that the minimum iterations
2914   // check ensures that N >= Step.
2915   if (VF > 1 && Legal->requiresScalarEpilogue()) {
2916     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
2917     R = Builder.CreateSelect(IsZero, Step, R);
2918   }
2919 
2920   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
2921 
2922   return VectorTripCount;
2923 }
2924 
2925 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
2926                                                          BasicBlock *Bypass) {
2927   Value *Count = getOrCreateTripCount(L);
2928   BasicBlock *BB = L->getLoopPreheader();
2929   IRBuilder<> Builder(BB->getTerminator());
2930 
2931   // Generate code to check that the loop's trip count that we computed by
2932   // adding one to the backedge-taken count will not overflow.
2933   Value *CheckMinIters = Builder.CreateICmpULT(
2934       Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check");
2935 
2936   BasicBlock *NewBB =
2937       BB->splitBasicBlock(BB->getTerminator(), "min.iters.checked");
2938   // Update dominator tree immediately if the generated block is a
2939   // LoopBypassBlock because SCEV expansions to generate loop bypass
2940   // checks may query it before the current function is finished.
2941   DT->addNewBlock(NewBB, BB);
2942   if (L->getParentLoop())
2943     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
2944   ReplaceInstWithInst(BB->getTerminator(),
2945                       BranchInst::Create(Bypass, NewBB, CheckMinIters));
2946   LoopBypassBlocks.push_back(BB);
2947 }
2948 
2949 void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L,
2950                                                      BasicBlock *Bypass) {
2951   Value *TC = getOrCreateVectorTripCount(L);
2952   BasicBlock *BB = L->getLoopPreheader();
2953   IRBuilder<> Builder(BB->getTerminator());
2954 
2955   // Now, compare the new count to zero. If it is zero skip the vector loop and
2956   // jump to the scalar loop.
2957   Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()),
2958                                     "cmp.zero");
2959 
2960   // Generate code to check that the loop's trip count that we computed by
2961   // adding one to the backedge-taken count will not overflow.
2962   BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
2963   // Update dominator tree immediately if the generated block is a
2964   // LoopBypassBlock because SCEV expansions to generate loop bypass
2965   // checks may query it before the current function is finished.
2966   DT->addNewBlock(NewBB, BB);
2967   if (L->getParentLoop())
2968     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
2969   ReplaceInstWithInst(BB->getTerminator(),
2970                       BranchInst::Create(Bypass, NewBB, Cmp));
2971   LoopBypassBlocks.push_back(BB);
2972 }
2973 
2974 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
2975   BasicBlock *BB = L->getLoopPreheader();
2976 
2977   // Generate the code to check that the SCEV assumptions that we made.
2978   // We want the new basic block to start at the first instruction in a
2979   // sequence of instructions that form a check.
2980   SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
2981                    "scev.check");
2982   Value *SCEVCheck =
2983       Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator());
2984 
2985   if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
2986     if (C->isZero())
2987       return;
2988 
2989   // Create a new block containing the stride check.
2990   BB->setName("vector.scevcheck");
2991   auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
2992   // Update dominator tree immediately if the generated block is a
2993   // LoopBypassBlock because SCEV expansions to generate loop bypass
2994   // checks may query it before the current function is finished.
2995   DT->addNewBlock(NewBB, BB);
2996   if (L->getParentLoop())
2997     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
2998   ReplaceInstWithInst(BB->getTerminator(),
2999                       BranchInst::Create(Bypass, NewBB, SCEVCheck));
3000   LoopBypassBlocks.push_back(BB);
3001   AddedSafetyChecks = true;
3002 }
3003 
3004 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
3005   BasicBlock *BB = L->getLoopPreheader();
3006 
3007   // Generate the code that checks in runtime if arrays overlap. We put the
3008   // checks into a separate block to make the more common case of few elements
3009   // faster.
3010   Instruction *FirstCheckInst;
3011   Instruction *MemRuntimeCheck;
3012   std::tie(FirstCheckInst, MemRuntimeCheck) =
3013       Legal->getLAI()->addRuntimeChecks(BB->getTerminator());
3014   if (!MemRuntimeCheck)
3015     return;
3016 
3017   // Create a new block containing the memory check.
3018   BB->setName("vector.memcheck");
3019   auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
3020   // Update dominator tree immediately if the generated block is a
3021   // LoopBypassBlock because SCEV expansions to generate loop bypass
3022   // checks may query it before the current function is finished.
3023   DT->addNewBlock(NewBB, BB);
3024   if (L->getParentLoop())
3025     L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
3026   ReplaceInstWithInst(BB->getTerminator(),
3027                       BranchInst::Create(Bypass, NewBB, MemRuntimeCheck));
3028   LoopBypassBlocks.push_back(BB);
3029   AddedSafetyChecks = true;
3030 
3031   // We currently don't use LoopVersioning for the actual loop cloning but we
3032   // still use it to add the noalias metadata.
3033   LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
3034                                            PSE.getSE());
3035   LVer->prepareNoAliasMetadata();
3036 }
3037 
3038 void InnerLoopVectorizer::createEmptyLoop() {
3039   /*
3040    In this function we generate a new loop. The new loop will contain
3041    the vectorized instructions while the old loop will continue to run the
3042    scalar remainder.
3043 
3044        [ ] <-- loop iteration number check.
3045     /   |
3046    /    v
3047   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3048   |  /  |
3049   | /   v
3050   ||   [ ]     <-- vector pre header.
3051   |/    |
3052   |     v
3053   |    [  ] \
3054   |    [  ]_|   <-- vector loop.
3055   |     |
3056   |     v
3057   |   -[ ]   <--- middle-block.
3058   |  /  |
3059   | /   v
3060   -|- >[ ]     <--- new preheader.
3061    |    |
3062    |    v
3063    |   [ ] \
3064    |   [ ]_|   <-- old scalar loop to handle remainder.
3065     \   |
3066      \  v
3067       >[ ]     <-- exit block.
3068    ...
3069    */
3070 
3071   BasicBlock *OldBasicBlock = OrigLoop->getHeader();
3072   BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
3073   BasicBlock *ExitBlock = OrigLoop->getExitBlock();
3074   assert(VectorPH && "Invalid loop structure");
3075   assert(ExitBlock && "Must have an exit block");
3076 
3077   // Some loops have a single integer induction variable, while other loops
3078   // don't. One example is c++ iterators that often have multiple pointer
3079   // induction variables. In the code below we also support a case where we
3080   // don't have a single induction variable.
3081   //
3082   // We try to obtain an induction variable from the original loop as hard
3083   // as possible. However if we don't find one that:
3084   //   - is an integer
3085   //   - counts from zero, stepping by one
3086   //   - is the size of the widest induction variable type
3087   // then we create a new one.
3088   OldInduction = Legal->getInduction();
3089   Type *IdxTy = Legal->getWidestInductionType();
3090 
3091   // Split the single block loop into the two loop structure described above.
3092   BasicBlock *VecBody =
3093       VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body");
3094   BasicBlock *MiddleBlock =
3095       VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block");
3096   BasicBlock *ScalarPH =
3097       MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph");
3098 
3099   // Create and register the new vector loop.
3100   Loop *Lp = new Loop();
3101   Loop *ParentLoop = OrigLoop->getParentLoop();
3102 
3103   // Insert the new loop into the loop nest and register the new basic blocks
3104   // before calling any utilities such as SCEV that require valid LoopInfo.
3105   if (ParentLoop) {
3106     ParentLoop->addChildLoop(Lp);
3107     ParentLoop->addBasicBlockToLoop(ScalarPH, *LI);
3108     ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI);
3109   } else {
3110     LI->addTopLevelLoop(Lp);
3111   }
3112   Lp->addBasicBlockToLoop(VecBody, *LI);
3113 
3114   // Find the loop boundaries.
3115   Value *Count = getOrCreateTripCount(Lp);
3116 
3117   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3118 
3119   // We need to test whether the backedge-taken count is uint##_max. Adding one
3120   // to it will cause overflow and an incorrect loop trip count in the vector
3121   // body. In case of overflow we want to directly jump to the scalar remainder
3122   // loop.
3123   emitMinimumIterationCountCheck(Lp, ScalarPH);
3124   // Now, compare the new count to zero. If it is zero skip the vector loop and
3125   // jump to the scalar loop.
3126   emitVectorLoopEnteredCheck(Lp, ScalarPH);
3127   // Generate the code to check any assumptions that we've made for SCEV
3128   // expressions.
3129   emitSCEVChecks(Lp, ScalarPH);
3130 
3131   // Generate the code that checks in runtime if arrays overlap. We put the
3132   // checks into a separate block to make the more common case of few elements
3133   // faster.
3134   emitMemRuntimeChecks(Lp, ScalarPH);
3135 
3136   // Generate the induction variable.
3137   // The loop step is equal to the vectorization factor (num of SIMD elements)
3138   // times the unroll factor (num of SIMD instructions).
3139   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3140   Constant *Step = ConstantInt::get(IdxTy, VF * UF);
3141   Induction =
3142       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3143                               getDebugLocFromInstOrOperands(OldInduction));
3144 
3145   // We are going to resume the execution of the scalar loop.
3146   // Go over all of the induction variables that we found and fix the
3147   // PHIs that are left in the scalar version of the loop.
3148   // The starting values of PHI nodes depend on the counter of the last
3149   // iteration in the vectorized loop.
3150   // If we come from a bypass edge then we need to start from the original
3151   // start value.
3152 
3153   // This variable saves the new starting index for the scalar loop. It is used
3154   // to test if there are any tail iterations left once the vector loop has
3155   // completed.
3156   LoopVectorizationLegality::InductionList::iterator I, E;
3157   LoopVectorizationLegality::InductionList *List = Legal->getInductionVars();
3158   for (I = List->begin(), E = List->end(); I != E; ++I) {
3159     PHINode *OrigPhi = I->first;
3160     InductionDescriptor II = I->second;
3161 
3162     // Create phi nodes to merge from the  backedge-taken check block.
3163     PHINode *BCResumeVal = PHINode::Create(
3164         OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator());
3165     Value *EndValue;
3166     if (OrigPhi == OldInduction) {
3167       // We know what the end value is.
3168       EndValue = CountRoundDown;
3169     } else {
3170       IRBuilder<> B(LoopBypassBlocks.back()->getTerminator());
3171       Value *CRD = B.CreateSExtOrTrunc(CountRoundDown,
3172                                        II.getStep()->getType(), "cast.crd");
3173       const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
3174       EndValue = II.transform(B, CRD, PSE.getSE(), DL);
3175       EndValue->setName("ind.end");
3176     }
3177 
3178     // The new PHI merges the original incoming value, in case of a bypass,
3179     // or the value at the end of the vectorized loop.
3180     BCResumeVal->addIncoming(EndValue, MiddleBlock);
3181 
3182     // Fix the scalar body counter (PHI node).
3183     unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH);
3184 
3185     // The old induction's phi node in the scalar body needs the truncated
3186     // value.
3187     for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
3188       BCResumeVal->addIncoming(II.getStartValue(), LoopBypassBlocks[I]);
3189     OrigPhi->setIncomingValue(BlockIdx, BCResumeVal);
3190   }
3191 
3192   // Add a check in the middle block to see if we have completed
3193   // all of the iterations in the first vector loop.
3194   // If (N - N%VF) == N, then we *don't* need to run the remainder.
3195   Value *CmpN =
3196       CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
3197                       CountRoundDown, "cmp.n", MiddleBlock->getTerminator());
3198   ReplaceInstWithInst(MiddleBlock->getTerminator(),
3199                       BranchInst::Create(ExitBlock, ScalarPH, CmpN));
3200 
3201   // Get ready to start creating new instructions into the vectorized body.
3202   Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt());
3203 
3204   // Save the state.
3205   LoopVectorPreHeader = Lp->getLoopPreheader();
3206   LoopScalarPreHeader = ScalarPH;
3207   LoopMiddleBlock = MiddleBlock;
3208   LoopExitBlock = ExitBlock;
3209   LoopVectorBody = VecBody;
3210   LoopScalarBody = OldBasicBlock;
3211 
3212   // Keep all loop hints from the original loop on the vector loop (we'll
3213   // replace the vectorizer-specific hints below).
3214   if (MDNode *LID = OrigLoop->getLoopID())
3215     Lp->setLoopID(LID);
3216 
3217   LoopVectorizeHints Hints(Lp, true);
3218   Hints.setAlreadyVectorized();
3219 }
3220 
3221 namespace {
3222 struct CSEDenseMapInfo {
3223   static bool canHandle(Instruction *I) {
3224     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3225            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3226   }
3227   static inline Instruction *getEmptyKey() {
3228     return DenseMapInfo<Instruction *>::getEmptyKey();
3229   }
3230   static inline Instruction *getTombstoneKey() {
3231     return DenseMapInfo<Instruction *>::getTombstoneKey();
3232   }
3233   static unsigned getHashValue(Instruction *I) {
3234     assert(canHandle(I) && "Unknown instruction!");
3235     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3236                                                            I->value_op_end()));
3237   }
3238   static bool isEqual(Instruction *LHS, Instruction *RHS) {
3239     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3240         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3241       return LHS == RHS;
3242     return LHS->isIdenticalTo(RHS);
3243   }
3244 };
3245 }
3246 
3247 ///\brief Perform cse of induction variable instructions.
3248 static void cse(BasicBlock *BB) {
3249   // Perform simple cse.
3250   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3251   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
3252     Instruction *In = &*I++;
3253 
3254     if (!CSEDenseMapInfo::canHandle(In))
3255       continue;
3256 
3257     // Check if we can replace this instruction with any of the
3258     // visited instructions.
3259     if (Instruction *V = CSEMap.lookup(In)) {
3260       In->replaceAllUsesWith(V);
3261       In->eraseFromParent();
3262       continue;
3263     }
3264 
3265     CSEMap[In] = In;
3266   }
3267 }
3268 
3269 /// \brief Adds a 'fast' flag to floating point operations.
3270 static Value *addFastMathFlag(Value *V) {
3271   if (isa<FPMathOperator>(V)) {
3272     FastMathFlags Flags;
3273     Flags.setUnsafeAlgebra();
3274     cast<Instruction>(V)->setFastMathFlags(Flags);
3275   }
3276   return V;
3277 }
3278 
3279 /// Estimate the overhead of scalarizing a value. Insert and Extract are set if
3280 /// the result needs to be inserted and/or extracted from vectors.
3281 static unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract,
3282                                          const TargetTransformInfo &TTI) {
3283   if (Ty->isVoidTy())
3284     return 0;
3285 
3286   assert(Ty->isVectorTy() && "Can only scalarize vectors");
3287   unsigned Cost = 0;
3288 
3289   for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
3290     if (Insert)
3291       Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, i);
3292     if (Extract)
3293       Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, Ty, i);
3294   }
3295 
3296   return Cost;
3297 }
3298 
3299 // Estimate cost of a call instruction CI if it were vectorized with factor VF.
3300 // Return the cost of the instruction, including scalarization overhead if it's
3301 // needed. The flag NeedToScalarize shows if the call needs to be scalarized -
3302 // i.e. either vector version isn't available, or is too expensive.
3303 static unsigned getVectorCallCost(CallInst *CI, unsigned VF,
3304                                   const TargetTransformInfo &TTI,
3305                                   const TargetLibraryInfo *TLI,
3306                                   bool &NeedToScalarize) {
3307   Function *F = CI->getCalledFunction();
3308   StringRef FnName = CI->getCalledFunction()->getName();
3309   Type *ScalarRetTy = CI->getType();
3310   SmallVector<Type *, 4> Tys, ScalarTys;
3311   for (auto &ArgOp : CI->arg_operands())
3312     ScalarTys.push_back(ArgOp->getType());
3313 
3314   // Estimate cost of scalarized vector call. The source operands are assumed
3315   // to be vectors, so we need to extract individual elements from there,
3316   // execute VF scalar calls, and then gather the result into the vector return
3317   // value.
3318   unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys);
3319   if (VF == 1)
3320     return ScalarCallCost;
3321 
3322   // Compute corresponding vector type for return value and arguments.
3323   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3324   for (unsigned i = 0, ie = ScalarTys.size(); i != ie; ++i)
3325     Tys.push_back(ToVectorTy(ScalarTys[i], VF));
3326 
3327   // Compute costs of unpacking argument values for the scalar calls and
3328   // packing the return values to a vector.
3329   unsigned ScalarizationCost =
3330       getScalarizationOverhead(RetTy, true, false, TTI);
3331   for (unsigned i = 0, ie = Tys.size(); i != ie; ++i)
3332     ScalarizationCost += getScalarizationOverhead(Tys[i], false, true, TTI);
3333 
3334   unsigned Cost = ScalarCallCost * VF + ScalarizationCost;
3335 
3336   // If we can't emit a vector call for this function, then the currently found
3337   // cost is the cost we need to return.
3338   NeedToScalarize = true;
3339   if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin())
3340     return Cost;
3341 
3342   // If the corresponding vector cost is cheaper, return its cost.
3343   unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys);
3344   if (VectorCallCost < Cost) {
3345     NeedToScalarize = false;
3346     return VectorCallCost;
3347   }
3348   return Cost;
3349 }
3350 
3351 // Estimate cost of an intrinsic call instruction CI if it were vectorized with
3352 // factor VF.  Return the cost of the instruction, including scalarization
3353 // overhead if it's needed.
3354 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF,
3355                                        const TargetTransformInfo &TTI,
3356                                        const TargetLibraryInfo *TLI) {
3357   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3358   assert(ID && "Expected intrinsic call!");
3359 
3360   Type *RetTy = ToVectorTy(CI->getType(), VF);
3361   SmallVector<Type *, 4> Tys;
3362   for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i)
3363     Tys.push_back(ToVectorTy(CI->getArgOperand(i)->getType(), VF));
3364 
3365   FastMathFlags FMF;
3366   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3367     FMF = FPMO->getFastMathFlags();
3368 
3369   return TTI.getIntrinsicInstrCost(ID, RetTy, Tys, FMF);
3370 }
3371 
3372 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3373   IntegerType *I1 = cast<IntegerType>(T1->getVectorElementType());
3374   IntegerType *I2 = cast<IntegerType>(T2->getVectorElementType());
3375   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3376 }
3377 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3378   IntegerType *I1 = cast<IntegerType>(T1->getVectorElementType());
3379   IntegerType *I2 = cast<IntegerType>(T2->getVectorElementType());
3380   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3381 }
3382 
3383 void InnerLoopVectorizer::truncateToMinimalBitwidths() {
3384   // For every instruction `I` in MinBWs, truncate the operands, create a
3385   // truncated version of `I` and reextend its result. InstCombine runs
3386   // later and will remove any ext/trunc pairs.
3387   //
3388   for (auto &KV : MinBWs) {
3389     VectorParts &Parts = WidenMap.get(KV.first);
3390     for (Value *&I : Parts) {
3391       if (I->use_empty())
3392         continue;
3393       Type *OriginalTy = I->getType();
3394       Type *ScalarTruncatedTy =
3395           IntegerType::get(OriginalTy->getContext(), KV.second);
3396       Type *TruncatedTy = VectorType::get(ScalarTruncatedTy,
3397                                           OriginalTy->getVectorNumElements());
3398       if (TruncatedTy == OriginalTy)
3399         continue;
3400 
3401       if (!isa<Instruction>(I))
3402         continue;
3403 
3404       IRBuilder<> B(cast<Instruction>(I));
3405       auto ShrinkOperand = [&](Value *V) -> Value * {
3406         if (auto *ZI = dyn_cast<ZExtInst>(V))
3407           if (ZI->getSrcTy() == TruncatedTy)
3408             return ZI->getOperand(0);
3409         return B.CreateZExtOrTrunc(V, TruncatedTy);
3410       };
3411 
3412       // The actual instruction modification depends on the instruction type,
3413       // unfortunately.
3414       Value *NewI = nullptr;
3415       if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
3416         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3417                              ShrinkOperand(BO->getOperand(1)));
3418         cast<BinaryOperator>(NewI)->copyIRFlags(I);
3419       } else if (ICmpInst *CI = dyn_cast<ICmpInst>(I)) {
3420         NewI =
3421             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3422                          ShrinkOperand(CI->getOperand(1)));
3423       } else if (SelectInst *SI = dyn_cast<SelectInst>(I)) {
3424         NewI = B.CreateSelect(SI->getCondition(),
3425                               ShrinkOperand(SI->getTrueValue()),
3426                               ShrinkOperand(SI->getFalseValue()));
3427       } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
3428         switch (CI->getOpcode()) {
3429         default:
3430           llvm_unreachable("Unhandled cast!");
3431         case Instruction::Trunc:
3432           NewI = ShrinkOperand(CI->getOperand(0));
3433           break;
3434         case Instruction::SExt:
3435           NewI = B.CreateSExtOrTrunc(
3436               CI->getOperand(0),
3437               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3438           break;
3439         case Instruction::ZExt:
3440           NewI = B.CreateZExtOrTrunc(
3441               CI->getOperand(0),
3442               smallestIntegerVectorType(OriginalTy, TruncatedTy));
3443           break;
3444         }
3445       } else if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(I)) {
3446         auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements();
3447         auto *O0 = B.CreateZExtOrTrunc(
3448             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
3449         auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements();
3450         auto *O1 = B.CreateZExtOrTrunc(
3451             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
3452 
3453         NewI = B.CreateShuffleVector(O0, O1, SI->getMask());
3454       } else if (isa<LoadInst>(I)) {
3455         // Don't do anything with the operands, just extend the result.
3456         continue;
3457       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
3458         auto Elements = IE->getOperand(0)->getType()->getVectorNumElements();
3459         auto *O0 = B.CreateZExtOrTrunc(
3460             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3461         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
3462         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
3463       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
3464         auto Elements = EE->getOperand(0)->getType()->getVectorNumElements();
3465         auto *O0 = B.CreateZExtOrTrunc(
3466             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
3467         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
3468       } else {
3469         llvm_unreachable("Unhandled instruction type!");
3470       }
3471 
3472       // Lastly, extend the result.
3473       NewI->takeName(cast<Instruction>(I));
3474       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
3475       I->replaceAllUsesWith(Res);
3476       cast<Instruction>(I)->eraseFromParent();
3477       I = Res;
3478     }
3479   }
3480 
3481   // We'll have created a bunch of ZExts that are now parentless. Clean up.
3482   for (auto &KV : MinBWs) {
3483     VectorParts &Parts = WidenMap.get(KV.first);
3484     for (Value *&I : Parts) {
3485       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
3486       if (Inst && Inst->use_empty()) {
3487         Value *NewI = Inst->getOperand(0);
3488         Inst->eraseFromParent();
3489         I = NewI;
3490       }
3491     }
3492   }
3493 }
3494 
3495 void InnerLoopVectorizer::vectorizeLoop() {
3496   //===------------------------------------------------===//
3497   //
3498   // Notice: any optimization or new instruction that go
3499   // into the code below should be also be implemented in
3500   // the cost-model.
3501   //
3502   //===------------------------------------------------===//
3503   Constant *Zero = Builder.getInt32(0);
3504 
3505   // In order to support recurrences we need to be able to vectorize Phi nodes.
3506   // Phi nodes have cycles, so we need to vectorize them in two stages. First,
3507   // we create a new vector PHI node with no incoming edges. We use this value
3508   // when we vectorize all of the instructions that use the PHI. Next, after
3509   // all of the instructions in the block are complete we add the new incoming
3510   // edges to the PHI. At this point all of the instructions in the basic block
3511   // are vectorized, so we can use them to construct the PHI.
3512   PhiVector PHIsToFix;
3513 
3514   // Scan the loop in a topological order to ensure that defs are vectorized
3515   // before users.
3516   LoopBlocksDFS DFS(OrigLoop);
3517   DFS.perform(LI);
3518 
3519   // Vectorize all of the blocks in the original loop.
3520   for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(), be = DFS.endRPO();
3521        bb != be; ++bb)
3522     vectorizeBlockInLoop(*bb, &PHIsToFix);
3523 
3524   // Insert truncates and extends for any truncated instructions as hints to
3525   // InstCombine.
3526   if (VF > 1)
3527     truncateToMinimalBitwidths();
3528 
3529   // At this point every instruction in the original loop is widened to a
3530   // vector form. Now we need to fix the recurrences in PHIsToFix. These PHI
3531   // nodes are currently empty because we did not want to introduce cycles.
3532   // This is the second stage of vectorizing recurrences.
3533   for (PHINode *Phi : PHIsToFix) {
3534     assert(Phi && "Unable to recover vectorized PHI");
3535 
3536     // Handle first-order recurrences that need to be fixed.
3537     if (Legal->isFirstOrderRecurrence(Phi)) {
3538       fixFirstOrderRecurrence(Phi);
3539       continue;
3540     }
3541 
3542     // If the phi node is not a first-order recurrence, it must be a reduction.
3543     // Get it's reduction variable descriptor.
3544     assert(Legal->isReductionVariable(Phi) &&
3545            "Unable to find the reduction variable");
3546     RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi];
3547 
3548     RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
3549     TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
3550     Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
3551     RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
3552         RdxDesc.getMinMaxRecurrenceKind();
3553     setDebugLocFromInst(Builder, ReductionStartValue);
3554 
3555     // We need to generate a reduction vector from the incoming scalar.
3556     // To do so, we need to generate the 'identity' vector and override
3557     // one of the elements with the incoming scalar reduction. We need
3558     // to do it in the vector-loop preheader.
3559     Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator());
3560 
3561     // This is the vector-clone of the value that leaves the loop.
3562     VectorParts &VectorExit = getVectorValue(LoopExitInst);
3563     Type *VecTy = VectorExit[0]->getType();
3564 
3565     // Find the reduction identity variable. Zero for addition, or, xor,
3566     // one for multiplication, -1 for And.
3567     Value *Identity;
3568     Value *VectorStart;
3569     if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
3570         RK == RecurrenceDescriptor::RK_FloatMinMax) {
3571       // MinMax reduction have the start value as their identify.
3572       if (VF == 1) {
3573         VectorStart = Identity = ReductionStartValue;
3574       } else {
3575         VectorStart = Identity =
3576             Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident");
3577       }
3578     } else {
3579       // Handle other reduction kinds:
3580       Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
3581           RK, VecTy->getScalarType());
3582       if (VF == 1) {
3583         Identity = Iden;
3584         // This vector is the Identity vector where the first element is the
3585         // incoming scalar reduction.
3586         VectorStart = ReductionStartValue;
3587       } else {
3588         Identity = ConstantVector::getSplat(VF, Iden);
3589 
3590         // This vector is the Identity vector where the first element is the
3591         // incoming scalar reduction.
3592         VectorStart =
3593             Builder.CreateInsertElement(Identity, ReductionStartValue, Zero);
3594       }
3595     }
3596 
3597     // Fix the vector-loop phi.
3598 
3599     // Reductions do not have to start at zero. They can start with
3600     // any loop invariant values.
3601     VectorParts &VecRdxPhi = WidenMap.get(Phi);
3602     BasicBlock *Latch = OrigLoop->getLoopLatch();
3603     Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
3604     VectorParts &Val = getVectorValue(LoopVal);
3605     for (unsigned part = 0; part < UF; ++part) {
3606       // Make sure to add the reduction stat value only to the
3607       // first unroll part.
3608       Value *StartVal = (part == 0) ? VectorStart : Identity;
3609       cast<PHINode>(VecRdxPhi[part])
3610           ->addIncoming(StartVal, LoopVectorPreHeader);
3611       cast<PHINode>(VecRdxPhi[part])
3612           ->addIncoming(Val[part], LoopVectorBody);
3613     }
3614 
3615     // Before each round, move the insertion point right between
3616     // the PHIs and the values we are going to write.
3617     // This allows us to write both PHINodes and the extractelement
3618     // instructions.
3619     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3620 
3621     VectorParts RdxParts = getVectorValue(LoopExitInst);
3622     setDebugLocFromInst(Builder, LoopExitInst);
3623 
3624     // If the vector reduction can be performed in a smaller type, we truncate
3625     // then extend the loop exit value to enable InstCombine to evaluate the
3626     // entire expression in the smaller type.
3627     if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) {
3628       Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
3629       Builder.SetInsertPoint(LoopVectorBody->getTerminator());
3630       for (unsigned part = 0; part < UF; ++part) {
3631         Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy);
3632         Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
3633                                           : Builder.CreateZExt(Trunc, VecTy);
3634         for (Value::user_iterator UI = RdxParts[part]->user_begin();
3635              UI != RdxParts[part]->user_end();)
3636           if (*UI != Trunc) {
3637             (*UI++)->replaceUsesOfWith(RdxParts[part], Extnd);
3638             RdxParts[part] = Extnd;
3639           } else {
3640             ++UI;
3641           }
3642       }
3643       Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
3644       for (unsigned part = 0; part < UF; ++part)
3645         RdxParts[part] = Builder.CreateTrunc(RdxParts[part], RdxVecTy);
3646     }
3647 
3648     // Reduce all of the unrolled parts into a single vector.
3649     Value *ReducedPartRdx = RdxParts[0];
3650     unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
3651     setDebugLocFromInst(Builder, ReducedPartRdx);
3652     for (unsigned part = 1; part < UF; ++part) {
3653       if (Op != Instruction::ICmp && Op != Instruction::FCmp)
3654         // Floating point operations had to be 'fast' to enable the reduction.
3655         ReducedPartRdx = addFastMathFlag(
3656             Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part],
3657                                 ReducedPartRdx, "bin.rdx"));
3658       else
3659         ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp(
3660             Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]);
3661     }
3662 
3663     if (VF > 1) {
3664       // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles
3665       // and vector ops, reducing the set of values being computed by half each
3666       // round.
3667       assert(isPowerOf2_32(VF) &&
3668              "Reduction emission only supported for pow2 vectors!");
3669       Value *TmpVec = ReducedPartRdx;
3670       SmallVector<Constant *, 32> ShuffleMask(VF, nullptr);
3671       for (unsigned i = VF; i != 1; i >>= 1) {
3672         // Move the upper half of the vector to the lower half.
3673         for (unsigned j = 0; j != i / 2; ++j)
3674           ShuffleMask[j] = Builder.getInt32(i / 2 + j);
3675 
3676         // Fill the rest of the mask with undef.
3677         std::fill(&ShuffleMask[i / 2], ShuffleMask.end(),
3678                   UndefValue::get(Builder.getInt32Ty()));
3679 
3680         Value *Shuf = Builder.CreateShuffleVector(
3681             TmpVec, UndefValue::get(TmpVec->getType()),
3682             ConstantVector::get(ShuffleMask), "rdx.shuf");
3683 
3684         if (Op != Instruction::ICmp && Op != Instruction::FCmp)
3685           // Floating point operations had to be 'fast' to enable the reduction.
3686           TmpVec = addFastMathFlag(Builder.CreateBinOp(
3687               (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx"));
3688         else
3689           TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind,
3690                                                         TmpVec, Shuf);
3691       }
3692 
3693       // The result is in the first element of the vector.
3694       ReducedPartRdx =
3695           Builder.CreateExtractElement(TmpVec, Builder.getInt32(0));
3696 
3697       // If the reduction can be performed in a smaller type, we need to extend
3698       // the reduction to the wider type before we branch to the original loop.
3699       if (Phi->getType() != RdxDesc.getRecurrenceType())
3700         ReducedPartRdx =
3701             RdxDesc.isSigned()
3702                 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
3703                 : Builder.CreateZExt(ReducedPartRdx, Phi->getType());
3704     }
3705 
3706     // Create a phi node that merges control-flow from the backedge-taken check
3707     // block and the middle block.
3708     PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
3709                                           LoopScalarPreHeader->getTerminator());
3710     for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
3711       BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
3712     BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
3713 
3714     // Now, we need to fix the users of the reduction variable
3715     // inside and outside of the scalar remainder loop.
3716     // We know that the loop is in LCSSA form. We need to update the
3717     // PHI nodes in the exit blocks.
3718     for (BasicBlock::iterator LEI = LoopExitBlock->begin(),
3719                               LEE = LoopExitBlock->end();
3720          LEI != LEE; ++LEI) {
3721       PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI);
3722       if (!LCSSAPhi)
3723         break;
3724 
3725       // All PHINodes need to have a single entry edge, or two if
3726       // we already fixed them.
3727       assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
3728 
3729       // We found our reduction value exit-PHI. Update it with the
3730       // incoming bypass edge.
3731       if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) {
3732         // Add an edge coming from the bypass.
3733         LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
3734         break;
3735       }
3736     } // end of the LCSSA phi scan.
3737 
3738     // Fix the scalar loop reduction variable with the incoming reduction sum
3739     // from the vector body and from the backedge value.
3740     int IncomingEdgeBlockIdx =
3741         Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
3742     assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
3743     // Pick the other block.
3744     int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
3745     Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
3746     Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
3747   } // end of for each Phi in PHIsToFix.
3748 
3749   fixLCSSAPHIs();
3750 
3751   // Make sure DomTree is updated.
3752   updateAnalysis();
3753 
3754   // Predicate any stores.
3755   for (auto KV : PredicatedStores) {
3756     BasicBlock::iterator I(KV.first);
3757     auto *BB = SplitBlock(I->getParent(), &*std::next(I), DT, LI);
3758     auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false,
3759                                         /*BranchWeights=*/nullptr, DT, LI);
3760     I->moveBefore(T);
3761     I->getParent()->setName("pred.store.if");
3762     BB->setName("pred.store.continue");
3763   }
3764   DEBUG(DT->verifyDomTree());
3765   // Remove redundant induction instructions.
3766   cse(LoopVectorBody);
3767 }
3768 
3769 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
3770 
3771   // This is the second phase of vectorizing first-order recurrences. An
3772   // overview of the transformation is described below. Suppose we have the
3773   // following loop.
3774   //
3775   //   for (int i = 0; i < n; ++i)
3776   //     b[i] = a[i] - a[i - 1];
3777   //
3778   // There is a first-order recurrence on "a". For this loop, the shorthand
3779   // scalar IR looks like:
3780   //
3781   //   scalar.ph:
3782   //     s_init = a[-1]
3783   //     br scalar.body
3784   //
3785   //   scalar.body:
3786   //     i = phi [0, scalar.ph], [i+1, scalar.body]
3787   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
3788   //     s2 = a[i]
3789   //     b[i] = s2 - s1
3790   //     br cond, scalar.body, ...
3791   //
3792   // In this example, s1 is a recurrence because it's value depends on the
3793   // previous iteration. In the first phase of vectorization, we created a
3794   // temporary value for s1. We now complete the vectorization and produce the
3795   // shorthand vector IR shown below (for VF = 4, UF = 1).
3796   //
3797   //   vector.ph:
3798   //     v_init = vector(..., ..., ..., a[-1])
3799   //     br vector.body
3800   //
3801   //   vector.body
3802   //     i = phi [0, vector.ph], [i+4, vector.body]
3803   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
3804   //     v2 = a[i, i+1, i+2, i+3];
3805   //     v3 = vector(v1(3), v2(0, 1, 2))
3806   //     b[i, i+1, i+2, i+3] = v2 - v3
3807   //     br cond, vector.body, middle.block
3808   //
3809   //   middle.block:
3810   //     x = v2(3)
3811   //     br scalar.ph
3812   //
3813   //   scalar.ph:
3814   //     s_init = phi [x, middle.block], [a[-1], otherwise]
3815   //     br scalar.body
3816   //
3817   // After execution completes the vector loop, we extract the next value of
3818   // the recurrence (x) to use as the initial value in the scalar loop.
3819 
3820   // Get the original loop preheader and single loop latch.
3821   auto *Preheader = OrigLoop->getLoopPreheader();
3822   auto *Latch = OrigLoop->getLoopLatch();
3823 
3824   // Get the initial and previous values of the scalar recurrence.
3825   auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
3826   auto *Previous = Phi->getIncomingValueForBlock(Latch);
3827 
3828   // Create a vector from the initial value.
3829   auto *VectorInit = ScalarInit;
3830   if (VF > 1) {
3831     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
3832     VectorInit = Builder.CreateInsertElement(
3833         UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
3834         Builder.getInt32(VF - 1), "vector.recur.init");
3835   }
3836 
3837   // We constructed a temporary phi node in the first phase of vectorization.
3838   // This phi node will eventually be deleted.
3839   auto &PhiParts = getVectorValue(Phi);
3840   Builder.SetInsertPoint(cast<Instruction>(PhiParts[0]));
3841 
3842   // Create a phi node for the new recurrence. The current value will either be
3843   // the initial value inserted into a vector or loop-varying vector value.
3844   auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
3845   VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
3846 
3847   // Get the vectorized previous value. We ensured the previous values was an
3848   // instruction when detecting the recurrence.
3849   auto &PreviousParts = getVectorValue(Previous);
3850 
3851   // Set the insertion point to be after this instruction. We ensured the
3852   // previous value dominated all uses of the phi when detecting the
3853   // recurrence.
3854   Builder.SetInsertPoint(
3855       &*++BasicBlock::iterator(cast<Instruction>(PreviousParts[UF - 1])));
3856 
3857   // We will construct a vector for the recurrence by combining the values for
3858   // the current and previous iterations. This is the required shuffle mask.
3859   SmallVector<Constant *, 8> ShuffleMask(VF);
3860   ShuffleMask[0] = Builder.getInt32(VF - 1);
3861   for (unsigned I = 1; I < VF; ++I)
3862     ShuffleMask[I] = Builder.getInt32(I + VF - 1);
3863 
3864   // The vector from which to take the initial value for the current iteration
3865   // (actual or unrolled). Initially, this is the vector phi node.
3866   Value *Incoming = VecPhi;
3867 
3868   // Shuffle the current and previous vector and update the vector parts.
3869   for (unsigned Part = 0; Part < UF; ++Part) {
3870     auto *Shuffle =
3871         VF > 1
3872             ? Builder.CreateShuffleVector(Incoming, PreviousParts[Part],
3873                                           ConstantVector::get(ShuffleMask))
3874             : Incoming;
3875     PhiParts[Part]->replaceAllUsesWith(Shuffle);
3876     cast<Instruction>(PhiParts[Part])->eraseFromParent();
3877     PhiParts[Part] = Shuffle;
3878     Incoming = PreviousParts[Part];
3879   }
3880 
3881   // Fix the latch value of the new recurrence in the vector loop.
3882   VecPhi->addIncoming(Incoming,
3883                       LI->getLoopFor(LoopVectorBody)->getLoopLatch());
3884 
3885   // Extract the last vector element in the middle block. This will be the
3886   // initial value for the recurrence when jumping to the scalar loop.
3887   auto *Extract = Incoming;
3888   if (VF > 1) {
3889     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
3890     Extract = Builder.CreateExtractElement(Extract, Builder.getInt32(VF - 1),
3891                                            "vector.recur.extract");
3892   }
3893 
3894   // Fix the initial value of the original recurrence in the scalar loop.
3895   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
3896   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
3897   for (auto *BB : predecessors(LoopScalarPreHeader)) {
3898     auto *Incoming = BB == LoopMiddleBlock ? Extract : ScalarInit;
3899     Start->addIncoming(Incoming, BB);
3900   }
3901 
3902   Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start);
3903   Phi->setName("scalar.recur");
3904 
3905   // Finally, fix users of the recurrence outside the loop. The users will need
3906   // either the last value of the scalar recurrence or the last value of the
3907   // vector recurrence we extracted in the middle block. Since the loop is in
3908   // LCSSA form, we just need to find the phi node for the original scalar
3909   // recurrence in the exit block, and then add an edge for the middle block.
3910   for (auto &I : *LoopExitBlock) {
3911     auto *LCSSAPhi = dyn_cast<PHINode>(&I);
3912     if (!LCSSAPhi)
3913       break;
3914     if (LCSSAPhi->getIncomingValue(0) == Phi) {
3915       LCSSAPhi->addIncoming(Extract, LoopMiddleBlock);
3916       break;
3917     }
3918   }
3919 }
3920 
3921 void InnerLoopVectorizer::fixLCSSAPHIs() {
3922   for (BasicBlock::iterator LEI = LoopExitBlock->begin(),
3923                             LEE = LoopExitBlock->end();
3924        LEI != LEE; ++LEI) {
3925     PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI);
3926     if (!LCSSAPhi)
3927       break;
3928     if (LCSSAPhi->getNumIncomingValues() == 1)
3929       LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()),
3930                             LoopMiddleBlock);
3931   }
3932 }
3933 
3934 InnerLoopVectorizer::VectorParts
3935 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) {
3936   assert(std::find(pred_begin(Dst), pred_end(Dst), Src) != pred_end(Dst) &&
3937          "Invalid edge");
3938 
3939   // Look for cached value.
3940   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
3941   EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge);
3942   if (ECEntryIt != MaskCache.end())
3943     return ECEntryIt->second;
3944 
3945   VectorParts SrcMask = createBlockInMask(Src);
3946 
3947   // The terminator has to be a branch inst!
3948   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
3949   assert(BI && "Unexpected terminator found");
3950 
3951   if (BI->isConditional()) {
3952     VectorParts EdgeMask = getVectorValue(BI->getCondition());
3953 
3954     if (BI->getSuccessor(0) != Dst)
3955       for (unsigned part = 0; part < UF; ++part)
3956         EdgeMask[part] = Builder.CreateNot(EdgeMask[part]);
3957 
3958     for (unsigned part = 0; part < UF; ++part)
3959       EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]);
3960 
3961     MaskCache[Edge] = EdgeMask;
3962     return EdgeMask;
3963   }
3964 
3965   MaskCache[Edge] = SrcMask;
3966   return SrcMask;
3967 }
3968 
3969 InnerLoopVectorizer::VectorParts
3970 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) {
3971   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
3972 
3973   // Loop incoming mask is all-one.
3974   if (OrigLoop->getHeader() == BB) {
3975     Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1);
3976     return getVectorValue(C);
3977   }
3978 
3979   // This is the block mask. We OR all incoming edges, and with zero.
3980   Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0);
3981   VectorParts BlockMask = getVectorValue(Zero);
3982 
3983   // For each pred:
3984   for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) {
3985     VectorParts EM = createEdgeMask(*it, BB);
3986     for (unsigned part = 0; part < UF; ++part)
3987       BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]);
3988   }
3989 
3990   return BlockMask;
3991 }
3992 
3993 void InnerLoopVectorizer::widenPHIInstruction(
3994     Instruction *PN, InnerLoopVectorizer::VectorParts &Entry, unsigned UF,
3995     unsigned VF, PhiVector *PV) {
3996   PHINode *P = cast<PHINode>(PN);
3997   // Handle recurrences.
3998   if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
3999     for (unsigned part = 0; part < UF; ++part) {
4000       // This is phase one of vectorizing PHIs.
4001       Type *VecTy =
4002           (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
4003       Entry[part] = PHINode::Create(
4004           VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
4005     }
4006     PV->push_back(P);
4007     return;
4008   }
4009 
4010   setDebugLocFromInst(Builder, P);
4011   // Check for PHI nodes that are lowered to vector selects.
4012   if (P->getParent() != OrigLoop->getHeader()) {
4013     // We know that all PHIs in non-header blocks are converted into
4014     // selects, so we don't have to worry about the insertion order and we
4015     // can just use the builder.
4016     // At this point we generate the predication tree. There may be
4017     // duplications since this is a simple recursive scan, but future
4018     // optimizations will clean it up.
4019 
4020     unsigned NumIncoming = P->getNumIncomingValues();
4021 
4022     // Generate a sequence of selects of the form:
4023     // SELECT(Mask3, In3,
4024     //      SELECT(Mask2, In2,
4025     //                   ( ...)))
4026     for (unsigned In = 0; In < NumIncoming; In++) {
4027       VectorParts Cond =
4028           createEdgeMask(P->getIncomingBlock(In), P->getParent());
4029       VectorParts &In0 = getVectorValue(P->getIncomingValue(In));
4030 
4031       for (unsigned part = 0; part < UF; ++part) {
4032         // We might have single edge PHIs (blocks) - use an identity
4033         // 'select' for the first PHI operand.
4034         if (In == 0)
4035           Entry[part] = Builder.CreateSelect(Cond[part], In0[part], In0[part]);
4036         else
4037           // Select between the current value and the previous incoming edge
4038           // based on the incoming mask.
4039           Entry[part] = Builder.CreateSelect(Cond[part], In0[part], Entry[part],
4040                                              "predphi");
4041       }
4042     }
4043     return;
4044   }
4045 
4046   // This PHINode must be an induction variable.
4047   // Make sure that we know about it.
4048   assert(Legal->getInductionVars()->count(P) && "Not an induction variable");
4049 
4050   InductionDescriptor II = Legal->getInductionVars()->lookup(P);
4051   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4052 
4053   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4054   // which can be found from the original scalar operations.
4055   switch (II.getKind()) {
4056   case InductionDescriptor::IK_NoInduction:
4057     llvm_unreachable("Unknown induction");
4058   case InductionDescriptor::IK_IntInduction: {
4059     assert(P->getType() == II.getStartValue()->getType() && "Types must match");
4060     // Handle other induction variables that are now based on the
4061     // canonical one.
4062     Value *V = Induction;
4063     if (P != OldInduction) {
4064       V = Builder.CreateSExtOrTrunc(Induction, P->getType());
4065       V = II.transform(Builder, V, PSE.getSE(), DL);
4066       V->setName("offset.idx");
4067     }
4068     Value *Broadcasted = getBroadcastInstrs(V);
4069     // After broadcasting the induction variable we need to make the vector
4070     // consecutive by adding 0, 1, 2, etc.
4071     for (unsigned part = 0; part < UF; ++part)
4072       Entry[part] = getStepVector(Broadcasted, VF * part, II.getStep());
4073     return;
4074   }
4075   case InductionDescriptor::IK_PtrInduction:
4076     // Handle the pointer induction variable case.
4077     assert(P->getType()->isPointerTy() && "Unexpected type.");
4078     // This is the normalized GEP that starts counting at zero.
4079     Value *PtrInd = Induction;
4080     PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType());
4081     // This is the vector of results. Notice that we don't generate
4082     // vector geps because scalar geps result in better code.
4083     for (unsigned part = 0; part < UF; ++part) {
4084       if (VF == 1) {
4085         int EltIndex = part;
4086         Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex);
4087         Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4088         Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL);
4089         SclrGep->setName("next.gep");
4090         Entry[part] = SclrGep;
4091         continue;
4092       }
4093 
4094       Value *VecVal = UndefValue::get(VectorType::get(P->getType(), VF));
4095       for (unsigned int i = 0; i < VF; ++i) {
4096         int EltIndex = i + part * VF;
4097         Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex);
4098         Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4099         Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL);
4100         SclrGep->setName("next.gep");
4101         VecVal = Builder.CreateInsertElement(VecVal, SclrGep,
4102                                              Builder.getInt32(i), "insert.gep");
4103       }
4104       Entry[part] = VecVal;
4105     }
4106     return;
4107   }
4108 }
4109 
4110 void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) {
4111   // For each instruction in the old loop.
4112   for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
4113     VectorParts &Entry = WidenMap.get(&*it);
4114 
4115     switch (it->getOpcode()) {
4116     case Instruction::Br:
4117       // Nothing to do for PHIs and BR, since we already took care of the
4118       // loop control flow instructions.
4119       continue;
4120     case Instruction::PHI: {
4121       // Vectorize PHINodes.
4122       widenPHIInstruction(&*it, Entry, UF, VF, PV);
4123       continue;
4124     } // End of PHI.
4125 
4126     case Instruction::Add:
4127     case Instruction::FAdd:
4128     case Instruction::Sub:
4129     case Instruction::FSub:
4130     case Instruction::Mul:
4131     case Instruction::FMul:
4132     case Instruction::UDiv:
4133     case Instruction::SDiv:
4134     case Instruction::FDiv:
4135     case Instruction::URem:
4136     case Instruction::SRem:
4137     case Instruction::FRem:
4138     case Instruction::Shl:
4139     case Instruction::LShr:
4140     case Instruction::AShr:
4141     case Instruction::And:
4142     case Instruction::Or:
4143     case Instruction::Xor: {
4144       // Just widen binops.
4145       BinaryOperator *BinOp = dyn_cast<BinaryOperator>(it);
4146       setDebugLocFromInst(Builder, BinOp);
4147       VectorParts &A = getVectorValue(it->getOperand(0));
4148       VectorParts &B = getVectorValue(it->getOperand(1));
4149 
4150       // Use this vector value for all users of the original instruction.
4151       for (unsigned Part = 0; Part < UF; ++Part) {
4152         Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]);
4153 
4154         if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V))
4155           VecOp->copyIRFlags(BinOp);
4156 
4157         Entry[Part] = V;
4158       }
4159 
4160       addMetadata(Entry, &*it);
4161       break;
4162     }
4163     case Instruction::Select: {
4164       // Widen selects.
4165       // If the selector is loop invariant we can create a select
4166       // instruction with a scalar condition. Otherwise, use vector-select.
4167       auto *SE = PSE.getSE();
4168       bool InvariantCond =
4169           SE->isLoopInvariant(PSE.getSCEV(it->getOperand(0)), OrigLoop);
4170       setDebugLocFromInst(Builder, &*it);
4171 
4172       // The condition can be loop invariant  but still defined inside the
4173       // loop. This means that we can't just use the original 'cond' value.
4174       // We have to take the 'vectorized' value and pick the first lane.
4175       // Instcombine will make this a no-op.
4176       VectorParts &Cond = getVectorValue(it->getOperand(0));
4177       VectorParts &Op0 = getVectorValue(it->getOperand(1));
4178       VectorParts &Op1 = getVectorValue(it->getOperand(2));
4179 
4180       Value *ScalarCond =
4181           (VF == 1)
4182               ? Cond[0]
4183               : Builder.CreateExtractElement(Cond[0], Builder.getInt32(0));
4184 
4185       for (unsigned Part = 0; Part < UF; ++Part) {
4186         Entry[Part] = Builder.CreateSelect(
4187             InvariantCond ? ScalarCond : Cond[Part], Op0[Part], Op1[Part]);
4188       }
4189 
4190       addMetadata(Entry, &*it);
4191       break;
4192     }
4193 
4194     case Instruction::ICmp:
4195     case Instruction::FCmp: {
4196       // Widen compares. Generate vector compares.
4197       bool FCmp = (it->getOpcode() == Instruction::FCmp);
4198       CmpInst *Cmp = dyn_cast<CmpInst>(it);
4199       setDebugLocFromInst(Builder, &*it);
4200       VectorParts &A = getVectorValue(it->getOperand(0));
4201       VectorParts &B = getVectorValue(it->getOperand(1));
4202       for (unsigned Part = 0; Part < UF; ++Part) {
4203         Value *C = nullptr;
4204         if (FCmp) {
4205           C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]);
4206           cast<FCmpInst>(C)->copyFastMathFlags(&*it);
4207         } else {
4208           C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]);
4209         }
4210         Entry[Part] = C;
4211       }
4212 
4213       addMetadata(Entry, &*it);
4214       break;
4215     }
4216 
4217     case Instruction::Store:
4218     case Instruction::Load:
4219       vectorizeMemoryInstruction(&*it);
4220       break;
4221     case Instruction::ZExt:
4222     case Instruction::SExt:
4223     case Instruction::FPToUI:
4224     case Instruction::FPToSI:
4225     case Instruction::FPExt:
4226     case Instruction::PtrToInt:
4227     case Instruction::IntToPtr:
4228     case Instruction::SIToFP:
4229     case Instruction::UIToFP:
4230     case Instruction::Trunc:
4231     case Instruction::FPTrunc:
4232     case Instruction::BitCast: {
4233       CastInst *CI = dyn_cast<CastInst>(it);
4234       setDebugLocFromInst(Builder, &*it);
4235       /// Optimize the special case where the source is a constant integer
4236       /// induction variable. Notice that we can only optimize the 'trunc' case
4237       /// because: a. FP conversions lose precision, b. sext/zext may wrap,
4238       /// c. other casts depend on pointer size.
4239 
4240       if (CI->getOperand(0) == OldInduction &&
4241           it->getOpcode() == Instruction::Trunc) {
4242         InductionDescriptor II =
4243           Legal->getInductionVars()->lookup(OldInduction);
4244         if (auto StepValue = II.getConstIntStepValue()) {
4245           StepValue = ConstantInt::getSigned(cast<IntegerType>(CI->getType()),
4246                                              StepValue->getSExtValue());
4247           Value *ScalarCast = Builder.CreateCast(CI->getOpcode(), Induction,
4248                                                  CI->getType());
4249           Value *Broadcasted = getBroadcastInstrs(ScalarCast);
4250           for (unsigned Part = 0; Part < UF; ++Part)
4251             Entry[Part] = getStepVector(Broadcasted, VF * Part, StepValue);
4252           addMetadata(Entry, &*it);
4253           break;
4254         }
4255       }
4256       /// Vectorize casts.
4257       Type *DestTy =
4258           (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF);
4259 
4260       VectorParts &A = getVectorValue(it->getOperand(0));
4261       for (unsigned Part = 0; Part < UF; ++Part)
4262         Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy);
4263       addMetadata(Entry, &*it);
4264       break;
4265     }
4266 
4267     case Instruction::Call: {
4268       // Ignore dbg intrinsics.
4269       if (isa<DbgInfoIntrinsic>(it))
4270         break;
4271       setDebugLocFromInst(Builder, &*it);
4272 
4273       Module *M = BB->getParent()->getParent();
4274       CallInst *CI = cast<CallInst>(it);
4275 
4276       StringRef FnName = CI->getCalledFunction()->getName();
4277       Function *F = CI->getCalledFunction();
4278       Type *RetTy = ToVectorTy(CI->getType(), VF);
4279       SmallVector<Type *, 4> Tys;
4280       for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i)
4281         Tys.push_back(ToVectorTy(CI->getArgOperand(i)->getType(), VF));
4282 
4283       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4284       if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
4285                  ID == Intrinsic::lifetime_start)) {
4286         scalarizeInstruction(&*it);
4287         break;
4288       }
4289       // The flag shows whether we use Intrinsic or a usual Call for vectorized
4290       // version of the instruction.
4291       // Is it beneficial to perform intrinsic call compared to lib call?
4292       bool NeedToScalarize;
4293       unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize);
4294       bool UseVectorIntrinsic =
4295           ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost;
4296       if (!UseVectorIntrinsic && NeedToScalarize) {
4297         scalarizeInstruction(&*it);
4298         break;
4299       }
4300 
4301       for (unsigned Part = 0; Part < UF; ++Part) {
4302         SmallVector<Value *, 4> Args;
4303         for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) {
4304           Value *Arg = CI->getArgOperand(i);
4305           // Some intrinsics have a scalar argument - don't replace it with a
4306           // vector.
4307           if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) {
4308             VectorParts &VectorArg = getVectorValue(CI->getArgOperand(i));
4309             Arg = VectorArg[Part];
4310           }
4311           Args.push_back(Arg);
4312         }
4313 
4314         Function *VectorF;
4315         if (UseVectorIntrinsic) {
4316           // Use vector version of the intrinsic.
4317           Type *TysForDecl[] = {CI->getType()};
4318           if (VF > 1)
4319             TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4320           VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4321         } else {
4322           // Use vector version of the library call.
4323           StringRef VFnName = TLI->getVectorizedFunction(FnName, VF);
4324           assert(!VFnName.empty() && "Vector function name is empty.");
4325           VectorF = M->getFunction(VFnName);
4326           if (!VectorF) {
4327             // Generate a declaration
4328             FunctionType *FTy = FunctionType::get(RetTy, Tys, false);
4329             VectorF =
4330                 Function::Create(FTy, Function::ExternalLinkage, VFnName, M);
4331             VectorF->copyAttributesFrom(F);
4332           }
4333         }
4334         assert(VectorF && "Can't create vector function.");
4335 
4336         SmallVector<OperandBundleDef, 1> OpBundles;
4337         CI->getOperandBundlesAsDefs(OpBundles);
4338         CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4339 
4340         if (isa<FPMathOperator>(V))
4341           V->copyFastMathFlags(CI);
4342 
4343         Entry[Part] = V;
4344       }
4345 
4346       addMetadata(Entry, &*it);
4347       break;
4348     }
4349 
4350     default:
4351       // All other instructions are unsupported. Scalarize them.
4352       scalarizeInstruction(&*it);
4353       break;
4354     } // end of switch.
4355   }   // end of for_each instr.
4356 }
4357 
4358 void InnerLoopVectorizer::updateAnalysis() {
4359   // Forget the original basic block.
4360   PSE.getSE()->forgetLoop(OrigLoop);
4361 
4362   // Update the dominator tree information.
4363   assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) &&
4364          "Entry does not dominate exit.");
4365 
4366   // We don't predicate stores by this point, so the vector body should be a
4367   // single loop.
4368   DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader);
4369 
4370   DT->addNewBlock(LoopMiddleBlock, LoopVectorBody);
4371   DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]);
4372   DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader);
4373   DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]);
4374 
4375   DEBUG(DT->verifyDomTree());
4376 }
4377 
4378 /// \brief Check whether it is safe to if-convert this phi node.
4379 ///
4380 /// Phi nodes with constant expressions that can trap are not safe to if
4381 /// convert.
4382 static bool canIfConvertPHINodes(BasicBlock *BB) {
4383   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
4384     PHINode *Phi = dyn_cast<PHINode>(I);
4385     if (!Phi)
4386       return true;
4387     for (unsigned p = 0, e = Phi->getNumIncomingValues(); p != e; ++p)
4388       if (Constant *C = dyn_cast<Constant>(Phi->getIncomingValue(p)))
4389         if (C->canTrap())
4390           return false;
4391   }
4392   return true;
4393 }
4394 
4395 bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
4396   if (!EnableIfConversion) {
4397     emitAnalysis(VectorizationReport() << "if-conversion is disabled");
4398     return false;
4399   }
4400 
4401   assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable");
4402 
4403   // A list of pointers that we can safely read and write to.
4404   SmallPtrSet<Value *, 8> SafePointes;
4405 
4406   // Collect safe addresses.
4407   for (Loop::block_iterator BI = TheLoop->block_begin(),
4408                             BE = TheLoop->block_end();
4409        BI != BE; ++BI) {
4410     BasicBlock *BB = *BI;
4411 
4412     if (blockNeedsPredication(BB))
4413       continue;
4414 
4415     for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
4416       if (LoadInst *LI = dyn_cast<LoadInst>(I))
4417         SafePointes.insert(LI->getPointerOperand());
4418       else if (StoreInst *SI = dyn_cast<StoreInst>(I))
4419         SafePointes.insert(SI->getPointerOperand());
4420     }
4421   }
4422 
4423   // Collect the blocks that need predication.
4424   BasicBlock *Header = TheLoop->getHeader();
4425   for (Loop::block_iterator BI = TheLoop->block_begin(),
4426                             BE = TheLoop->block_end();
4427        BI != BE; ++BI) {
4428     BasicBlock *BB = *BI;
4429 
4430     // We don't support switch statements inside loops.
4431     if (!isa<BranchInst>(BB->getTerminator())) {
4432       emitAnalysis(VectorizationReport(BB->getTerminator())
4433                    << "loop contains a switch statement");
4434       return false;
4435     }
4436 
4437     // We must be able to predicate all blocks that need to be predicated.
4438     if (blockNeedsPredication(BB)) {
4439       if (!blockCanBePredicated(BB, SafePointes)) {
4440         emitAnalysis(VectorizationReport(BB->getTerminator())
4441                      << "control flow cannot be substituted for a select");
4442         return false;
4443       }
4444     } else if (BB != Header && !canIfConvertPHINodes(BB)) {
4445       emitAnalysis(VectorizationReport(BB->getTerminator())
4446                    << "control flow cannot be substituted for a select");
4447       return false;
4448     }
4449   }
4450 
4451   // We can if-convert this loop.
4452   return true;
4453 }
4454 
4455 bool LoopVectorizationLegality::canVectorize() {
4456   // We must have a loop in canonical form. Loops with indirectbr in them cannot
4457   // be canonicalized.
4458   if (!TheLoop->getLoopPreheader()) {
4459     emitAnalysis(VectorizationReport()
4460                  << "loop control flow is not understood by vectorizer");
4461     return false;
4462   }
4463 
4464   // We can only vectorize innermost loops.
4465   if (!TheLoop->empty()) {
4466     emitAnalysis(VectorizationReport() << "loop is not the innermost loop");
4467     return false;
4468   }
4469 
4470   // We must have a single backedge.
4471   if (TheLoop->getNumBackEdges() != 1) {
4472     emitAnalysis(VectorizationReport()
4473                  << "loop control flow is not understood by vectorizer");
4474     return false;
4475   }
4476 
4477   // We must have a single exiting block.
4478   if (!TheLoop->getExitingBlock()) {
4479     emitAnalysis(VectorizationReport()
4480                  << "loop control flow is not understood by vectorizer");
4481     return false;
4482   }
4483 
4484   // We only handle bottom-tested loops, i.e. loop in which the condition is
4485   // checked at the end of each iteration. With that we can assume that all
4486   // instructions in the loop are executed the same number of times.
4487   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
4488     emitAnalysis(VectorizationReport()
4489                  << "loop control flow is not understood by vectorizer");
4490     return false;
4491   }
4492 
4493   // We need to have a loop header.
4494   DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName()
4495                << '\n');
4496 
4497   // Check if we can if-convert non-single-bb loops.
4498   unsigned NumBlocks = TheLoop->getNumBlocks();
4499   if (NumBlocks != 1 && !canVectorizeWithIfConvert()) {
4500     DEBUG(dbgs() << "LV: Can't if-convert the loop.\n");
4501     return false;
4502   }
4503 
4504   // ScalarEvolution needs to be able to find the exit count.
4505   const SCEV *ExitCount = PSE.getBackedgeTakenCount();
4506   if (ExitCount == PSE.getSE()->getCouldNotCompute()) {
4507     emitAnalysis(VectorizationReport()
4508                  << "could not determine number of loop iterations");
4509     DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n");
4510     return false;
4511   }
4512 
4513   // Check if we can vectorize the instructions and CFG in this loop.
4514   if (!canVectorizeInstrs()) {
4515     DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n");
4516     return false;
4517   }
4518 
4519   // Go over each instruction and look at memory deps.
4520   if (!canVectorizeMemory()) {
4521     DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n");
4522     return false;
4523   }
4524 
4525   // Collect all of the variables that remain uniform after vectorization.
4526   collectLoopUniforms();
4527 
4528   DEBUG(dbgs() << "LV: We can vectorize this loop"
4529                << (LAI->getRuntimePointerChecking()->Need
4530                        ? " (with a runtime bound check)"
4531                        : "")
4532                << "!\n");
4533 
4534   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
4535 
4536   // If an override option has been passed in for interleaved accesses, use it.
4537   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
4538     UseInterleaved = EnableInterleavedMemAccesses;
4539 
4540   // Analyze interleaved memory accesses.
4541   if (UseInterleaved)
4542     InterleaveInfo.analyzeInterleaving(Strides);
4543 
4544   unsigned SCEVThreshold = VectorizeSCEVCheckThreshold;
4545   if (Hints->getForce() == LoopVectorizeHints::FK_Enabled)
4546     SCEVThreshold = PragmaVectorizeSCEVCheckThreshold;
4547 
4548   if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) {
4549     emitAnalysis(VectorizationReport()
4550                  << "Too many SCEV assumptions need to be made and checked "
4551                  << "at runtime");
4552     DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n");
4553     return false;
4554   }
4555 
4556   // Okay! We can vectorize. At this point we don't have any other mem analysis
4557   // which may limit our maximum vectorization factor, so just return true with
4558   // no restrictions.
4559   return true;
4560 }
4561 
4562 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) {
4563   if (Ty->isPointerTy())
4564     return DL.getIntPtrType(Ty);
4565 
4566   // It is possible that char's or short's overflow when we ask for the loop's
4567   // trip count, work around this by changing the type size.
4568   if (Ty->getScalarSizeInBits() < 32)
4569     return Type::getInt32Ty(Ty->getContext());
4570 
4571   return Ty;
4572 }
4573 
4574 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) {
4575   Ty0 = convertPointerToIntegerType(DL, Ty0);
4576   Ty1 = convertPointerToIntegerType(DL, Ty1);
4577   if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits())
4578     return Ty0;
4579   return Ty1;
4580 }
4581 
4582 /// \brief Check that the instruction has outside loop users and is not an
4583 /// identified reduction variable.
4584 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst,
4585                                SmallPtrSetImpl<Value *> &Reductions) {
4586   // Reduction instructions are allowed to have exit users. All other
4587   // instructions must not have external users.
4588   if (!Reductions.count(Inst))
4589     // Check that all of the users of the loop are inside the BB.
4590     for (User *U : Inst->users()) {
4591       Instruction *UI = cast<Instruction>(U);
4592       // This user may be a reduction exit value.
4593       if (!TheLoop->contains(UI)) {
4594         DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n');
4595         return true;
4596       }
4597     }
4598   return false;
4599 }
4600 
4601 bool LoopVectorizationLegality::addInductionPhi(PHINode *Phi,
4602                                                 InductionDescriptor ID) {
4603   Inductions[Phi] = ID;
4604   Type *PhiTy = Phi->getType();
4605   const DataLayout &DL = Phi->getModule()->getDataLayout();
4606 
4607   // Get the widest type.
4608   if (!WidestIndTy)
4609     WidestIndTy = convertPointerToIntegerType(DL, PhiTy);
4610   else
4611     WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy);
4612 
4613   // Int inductions are special because we only allow one IV.
4614   if (ID.getKind() == InductionDescriptor::IK_IntInduction &&
4615       ID.getConstIntStepValue() &&
4616       ID.getConstIntStepValue()->isOne() &&
4617       isa<Constant>(ID.getStartValue()) &&
4618       cast<Constant>(ID.getStartValue())->isNullValue()) {
4619 
4620     // Use the phi node with the widest type as induction. Use the last
4621     // one if there are multiple (no good reason for doing this other
4622     // than it is expedient). We've checked that it begins at zero and
4623     // steps by one, so this is a canonical induction variable.
4624     if (!Induction || PhiTy == WidestIndTy)
4625       Induction = Phi;
4626   }
4627 
4628   DEBUG(dbgs() << "LV: Found an induction variable.\n");
4629 
4630   // Until we explicitly handle the case of an induction variable with
4631   // an outside loop user we have to give up vectorizing this loop.
4632   if (hasOutsideLoopUser(TheLoop, Phi, AllowedExit)) {
4633     emitAnalysis(VectorizationReport(Phi) <<
4634                  "use of induction value outside of the "
4635                  "loop is not handled by vectorizer");
4636     return false;
4637   }
4638 
4639   return true;
4640 }
4641 
4642 bool LoopVectorizationLegality::canVectorizeInstrs() {
4643   BasicBlock *Header = TheLoop->getHeader();
4644 
4645   // Look for the attribute signaling the absence of NaNs.
4646   Function &F = *Header->getParent();
4647   HasFunNoNaNAttr =
4648       F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true";
4649 
4650   // For each block in the loop.
4651   for (Loop::block_iterator bb = TheLoop->block_begin(),
4652                             be = TheLoop->block_end();
4653        bb != be; ++bb) {
4654 
4655     // Scan the instructions in the block and look for hazards.
4656     for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e;
4657          ++it) {
4658 
4659       if (PHINode *Phi = dyn_cast<PHINode>(it)) {
4660         Type *PhiTy = Phi->getType();
4661         // Check that this PHI type is allowed.
4662         if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() &&
4663             !PhiTy->isPointerTy()) {
4664           emitAnalysis(VectorizationReport(&*it)
4665                        << "loop control flow is not understood by vectorizer");
4666           DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n");
4667           return false;
4668         }
4669 
4670         // If this PHINode is not in the header block, then we know that we
4671         // can convert it to select during if-conversion. No need to check if
4672         // the PHIs in this block are induction or reduction variables.
4673         if (*bb != Header) {
4674           // Check that this instruction has no outside users or is an
4675           // identified reduction value with an outside user.
4676           if (!hasOutsideLoopUser(TheLoop, &*it, AllowedExit))
4677             continue;
4678           emitAnalysis(VectorizationReport(&*it)
4679                        << "value could not be identified as "
4680                           "an induction or reduction variable");
4681           return false;
4682         }
4683 
4684         // We only allow if-converted PHIs with exactly two incoming values.
4685         if (Phi->getNumIncomingValues() != 2) {
4686           emitAnalysis(VectorizationReport(&*it)
4687                        << "control flow not understood by vectorizer");
4688           DEBUG(dbgs() << "LV: Found an invalid PHI.\n");
4689           return false;
4690         }
4691 
4692         RecurrenceDescriptor RedDes;
4693         if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) {
4694           if (RedDes.hasUnsafeAlgebra())
4695             Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst());
4696           AllowedExit.insert(RedDes.getLoopExitInstr());
4697           Reductions[Phi] = RedDes;
4698           continue;
4699         }
4700 
4701         InductionDescriptor ID;
4702         if (InductionDescriptor::isInductionPHI(Phi, PSE, ID)) {
4703           if (!addInductionPhi(Phi, ID))
4704             return false;
4705           continue;
4706         }
4707 
4708         if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, DT)) {
4709           FirstOrderRecurrences.insert(Phi);
4710           continue;
4711         }
4712 
4713         // As a last resort, coerce the PHI to a AddRec expression
4714         // and re-try classifying it a an induction PHI.
4715         if (InductionDescriptor::isInductionPHI(Phi, PSE, ID, true)) {
4716           if (!addInductionPhi(Phi, ID))
4717             return false;
4718           continue;
4719         }
4720 
4721         emitAnalysis(VectorizationReport(&*it)
4722                      << "value that could not be identified as "
4723                         "reduction is used outside the loop");
4724         DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n");
4725         return false;
4726       } // end of PHI handling
4727 
4728       // We handle calls that:
4729       //   * Are debug info intrinsics.
4730       //   * Have a mapping to an IR intrinsic.
4731       //   * Have a vector version available.
4732       CallInst *CI = dyn_cast<CallInst>(it);
4733       if (CI && !getVectorIntrinsicIDForCall(CI, TLI) &&
4734           !isa<DbgInfoIntrinsic>(CI) &&
4735           !(CI->getCalledFunction() && TLI &&
4736             TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) {
4737         emitAnalysis(VectorizationReport(&*it)
4738                      << "call instruction cannot be vectorized");
4739         DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n");
4740         return false;
4741       }
4742 
4743       // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the
4744       // second argument is the same (i.e. loop invariant)
4745       if (CI && hasVectorInstrinsicScalarOpd(
4746                     getVectorIntrinsicIDForCall(CI, TLI), 1)) {
4747         auto *SE = PSE.getSE();
4748         if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) {
4749           emitAnalysis(VectorizationReport(&*it)
4750                        << "intrinsic instruction cannot be vectorized");
4751           DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n");
4752           return false;
4753         }
4754       }
4755 
4756       // Check that the instruction return type is vectorizable.
4757       // Also, we can't vectorize extractelement instructions.
4758       if ((!VectorType::isValidElementType(it->getType()) &&
4759            !it->getType()->isVoidTy()) ||
4760           isa<ExtractElementInst>(it)) {
4761         emitAnalysis(VectorizationReport(&*it)
4762                      << "instruction return type cannot be vectorized");
4763         DEBUG(dbgs() << "LV: Found unvectorizable type.\n");
4764         return false;
4765       }
4766 
4767       // Check that the stored type is vectorizable.
4768       if (StoreInst *ST = dyn_cast<StoreInst>(it)) {
4769         Type *T = ST->getValueOperand()->getType();
4770         if (!VectorType::isValidElementType(T)) {
4771           emitAnalysis(VectorizationReport(ST)
4772                        << "store instruction cannot be vectorized");
4773           return false;
4774         }
4775         if (EnableMemAccessVersioning)
4776           collectStridedAccess(ST);
4777 
4778       } else if (LoadInst *LI = dyn_cast<LoadInst>(it)) {
4779         if (EnableMemAccessVersioning)
4780           collectStridedAccess(LI);
4781 
4782         // FP instructions can allow unsafe algebra, thus vectorizable by
4783         // non-IEEE-754 compliant SIMD units.
4784         // This applies to floating-point math operations and calls, not memory
4785         // operations, shuffles, or casts, as they don't change precision or
4786         // semantics.
4787       } else if (it->getType()->isFloatingPointTy() &&
4788                  (CI || it->isBinaryOp()) && !it->hasUnsafeAlgebra()) {
4789         DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n");
4790         Hints->setPotentiallyUnsafe();
4791       }
4792 
4793       // Reduction instructions are allowed to have exit users.
4794       // All other instructions must not have external users.
4795       if (hasOutsideLoopUser(TheLoop, &*it, AllowedExit)) {
4796         emitAnalysis(VectorizationReport(&*it)
4797                      << "value cannot be used outside the loop");
4798         return false;
4799       }
4800 
4801     } // next instr.
4802   }
4803 
4804   if (!Induction) {
4805     DEBUG(dbgs() << "LV: Did not find one integer induction var.\n");
4806     if (Inductions.empty()) {
4807       emitAnalysis(VectorizationReport()
4808                    << "loop induction variable could not be identified");
4809       return false;
4810     }
4811   }
4812 
4813   // Now we know the widest induction type, check if our found induction
4814   // is the same size. If it's not, unset it here and InnerLoopVectorizer
4815   // will create another.
4816   if (Induction && WidestIndTy != Induction->getType())
4817     Induction = nullptr;
4818 
4819   return true;
4820 }
4821 
4822 void LoopVectorizationLegality::collectStridedAccess(Value *MemAccess) {
4823   Value *Ptr = nullptr;
4824   if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess))
4825     Ptr = LI->getPointerOperand();
4826   else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess))
4827     Ptr = SI->getPointerOperand();
4828   else
4829     return;
4830 
4831   Value *Stride = getStrideFromPointer(Ptr, PSE.getSE(), TheLoop);
4832   if (!Stride)
4833     return;
4834 
4835   DEBUG(dbgs() << "LV: Found a strided access that we can version");
4836   DEBUG(dbgs() << "  Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
4837   Strides[Ptr] = Stride;
4838   StrideSet.insert(Stride);
4839 }
4840 
4841 void LoopVectorizationLegality::collectLoopUniforms() {
4842   // We now know that the loop is vectorizable!
4843   // Collect variables that will remain uniform after vectorization.
4844   std::vector<Value *> Worklist;
4845   BasicBlock *Latch = TheLoop->getLoopLatch();
4846 
4847   // Start with the conditional branch and walk up the block.
4848   Worklist.push_back(Latch->getTerminator()->getOperand(0));
4849 
4850   // Also add all consecutive pointer values; these values will be uniform
4851   // after vectorization (and subsequent cleanup) and, until revectorization is
4852   // supported, all dependencies must also be uniform.
4853   for (Loop::block_iterator B = TheLoop->block_begin(),
4854                             BE = TheLoop->block_end();
4855        B != BE; ++B)
4856     for (BasicBlock::iterator I = (*B)->begin(), IE = (*B)->end(); I != IE; ++I)
4857       if (I->getType()->isPointerTy() && isConsecutivePtr(&*I))
4858         Worklist.insert(Worklist.end(), I->op_begin(), I->op_end());
4859 
4860   while (!Worklist.empty()) {
4861     Instruction *I = dyn_cast<Instruction>(Worklist.back());
4862     Worklist.pop_back();
4863 
4864     // Look at instructions inside this loop.
4865     // Stop when reaching PHI nodes.
4866     // TODO: we need to follow values all over the loop, not only in this block.
4867     if (!I || !TheLoop->contains(I) || isa<PHINode>(I))
4868       continue;
4869 
4870     // This is a known uniform.
4871     Uniforms.insert(I);
4872 
4873     // Insert all operands.
4874     Worklist.insert(Worklist.end(), I->op_begin(), I->op_end());
4875   }
4876 }
4877 
4878 bool LoopVectorizationLegality::canVectorizeMemory() {
4879   LAI = &LAA->getInfo(TheLoop, Strides);
4880   auto &OptionalReport = LAI->getReport();
4881   if (OptionalReport)
4882     emitAnalysis(VectorizationReport(*OptionalReport));
4883   if (!LAI->canVectorizeMemory())
4884     return false;
4885 
4886   if (LAI->hasStoreToLoopInvariantAddress()) {
4887     emitAnalysis(
4888         VectorizationReport()
4889         << "write to a loop invariant address could not be vectorized");
4890     DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n");
4891     return false;
4892   }
4893 
4894   Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks());
4895   PSE.addPredicate(LAI->PSE.getUnionPredicate());
4896 
4897   return true;
4898 }
4899 
4900 bool LoopVectorizationLegality::isInductionVariable(const Value *V) {
4901   Value *In0 = const_cast<Value *>(V);
4902   PHINode *PN = dyn_cast_or_null<PHINode>(In0);
4903   if (!PN)
4904     return false;
4905 
4906   return Inductions.count(PN);
4907 }
4908 
4909 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) {
4910   return FirstOrderRecurrences.count(Phi);
4911 }
4912 
4913 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) {
4914   return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
4915 }
4916 
4917 bool LoopVectorizationLegality::blockCanBePredicated(
4918     BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) {
4919   const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
4920 
4921   for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
4922     // Check that we don't have a constant expression that can trap as operand.
4923     for (Instruction::op_iterator OI = it->op_begin(), OE = it->op_end();
4924          OI != OE; ++OI) {
4925       if (Constant *C = dyn_cast<Constant>(*OI))
4926         if (C->canTrap())
4927           return false;
4928     }
4929     // We might be able to hoist the load.
4930     if (it->mayReadFromMemory()) {
4931       LoadInst *LI = dyn_cast<LoadInst>(it);
4932       if (!LI)
4933         return false;
4934       if (!SafePtrs.count(LI->getPointerOperand())) {
4935         if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) ||
4936             isLegalMaskedGather(LI->getType())) {
4937           MaskedOp.insert(LI);
4938           continue;
4939         }
4940         // !llvm.mem.parallel_loop_access implies if-conversion safety.
4941         if (IsAnnotatedParallel)
4942           continue;
4943         return false;
4944       }
4945     }
4946 
4947     // We don't predicate stores at the moment.
4948     if (it->mayWriteToMemory()) {
4949       StoreInst *SI = dyn_cast<StoreInst>(it);
4950       // We only support predication of stores in basic blocks with one
4951       // predecessor.
4952       if (!SI)
4953         return false;
4954 
4955       // Build a masked store if it is legal for the target.
4956       if (isLegalMaskedStore(SI->getValueOperand()->getType(),
4957                              SI->getPointerOperand()) ||
4958           isLegalMaskedScatter(SI->getValueOperand()->getType())) {
4959         MaskedOp.insert(SI);
4960         continue;
4961       }
4962 
4963       bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0);
4964       bool isSinglePredecessor = SI->getParent()->getSinglePredecessor();
4965 
4966       if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr ||
4967           !isSinglePredecessor)
4968         return false;
4969     }
4970     if (it->mayThrow())
4971       return false;
4972 
4973     // The instructions below can trap.
4974     switch (it->getOpcode()) {
4975     default:
4976       continue;
4977     case Instruction::UDiv:
4978     case Instruction::SDiv:
4979     case Instruction::URem:
4980     case Instruction::SRem:
4981       return false;
4982     }
4983   }
4984 
4985   return true;
4986 }
4987 
4988 void InterleavedAccessInfo::collectConstStridedAccesses(
4989     MapVector<Instruction *, StrideDescriptor> &StrideAccesses,
4990     const ValueToValueMap &Strides) {
4991   // Holds load/store instructions in program order.
4992   SmallVector<Instruction *, 16> AccessList;
4993 
4994   for (auto *BB : TheLoop->getBlocks()) {
4995     bool IsPred = LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
4996 
4997     for (auto &I : *BB) {
4998       if (!isa<LoadInst>(&I) && !isa<StoreInst>(&I))
4999         continue;
5000       // FIXME: Currently we can't handle mixed accesses and predicated accesses
5001       if (IsPred)
5002         return;
5003 
5004       AccessList.push_back(&I);
5005     }
5006   }
5007 
5008   if (AccessList.empty())
5009     return;
5010 
5011   auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
5012   for (auto I : AccessList) {
5013     LoadInst *LI = dyn_cast<LoadInst>(I);
5014     StoreInst *SI = dyn_cast<StoreInst>(I);
5015 
5016     Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand();
5017     int Stride = getPtrStride(PSE, Ptr, TheLoop, Strides);
5018 
5019     // The factor of the corresponding interleave group.
5020     unsigned Factor = std::abs(Stride);
5021 
5022     // Ignore the access if the factor is too small or too large.
5023     if (Factor < 2 || Factor > MaxInterleaveGroupFactor)
5024       continue;
5025 
5026     const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
5027     PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
5028     unsigned Size = DL.getTypeAllocSize(PtrTy->getElementType());
5029 
5030     // An alignment of 0 means target ABI alignment.
5031     unsigned Align = LI ? LI->getAlignment() : SI->getAlignment();
5032     if (!Align)
5033       Align = DL.getABITypeAlignment(PtrTy->getElementType());
5034 
5035     StrideAccesses[I] = StrideDescriptor(Stride, Scev, Size, Align);
5036   }
5037 }
5038 
5039 // Analyze interleaved accesses and collect them into interleave groups.
5040 //
5041 // Notice that the vectorization on interleaved groups will change instruction
5042 // orders and may break dependences. But the memory dependence check guarantees
5043 // that there is no overlap between two pointers of different strides, element
5044 // sizes or underlying bases.
5045 //
5046 // For pointers sharing the same stride, element size and underlying base, no
5047 // need to worry about Read-After-Write dependences and Write-After-Read
5048 // dependences.
5049 //
5050 // E.g. The RAW dependence:  A[i] = a;
5051 //                           b = A[i];
5052 // This won't exist as it is a store-load forwarding conflict, which has
5053 // already been checked and forbidden in the dependence check.
5054 //
5055 // E.g. The WAR dependence:  a = A[i];  // (1)
5056 //                           A[i] = b;  // (2)
5057 // The store group of (2) is always inserted at or below (2), and the load group
5058 // of (1) is always inserted at or above (1). The dependence is safe.
5059 void InterleavedAccessInfo::analyzeInterleaving(
5060     const ValueToValueMap &Strides) {
5061   DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
5062 
5063   // Holds all the stride accesses.
5064   MapVector<Instruction *, StrideDescriptor> StrideAccesses;
5065   collectConstStridedAccesses(StrideAccesses, Strides);
5066 
5067   if (StrideAccesses.empty())
5068     return;
5069 
5070   // Holds all interleaved store groups temporarily.
5071   SmallSetVector<InterleaveGroup *, 4> StoreGroups;
5072   // Holds all interleaved load groups temporarily.
5073   SmallSetVector<InterleaveGroup *, 4> LoadGroups;
5074 
5075   // Search the load-load/write-write pair B-A in bottom-up order and try to
5076   // insert B into the interleave group of A according to 3 rules:
5077   //   1. A and B have the same stride.
5078   //   2. A and B have the same memory object size.
5079   //   3. B belongs to the group according to the distance.
5080   //
5081   // The bottom-up order can avoid breaking the Write-After-Write dependences
5082   // between two pointers of the same base.
5083   // E.g.  A[i]   = a;   (1)
5084   //       A[i]   = b;   (2)
5085   //       A[i+1] = c    (3)
5086   // We form the group (2)+(3) in front, so (1) has to form groups with accesses
5087   // above (1), which guarantees that (1) is always above (2).
5088   for (auto I = StrideAccesses.rbegin(), E = StrideAccesses.rend(); I != E;
5089        ++I) {
5090     Instruction *A = I->first;
5091     StrideDescriptor DesA = I->second;
5092 
5093     InterleaveGroup *Group = getInterleaveGroup(A);
5094     if (!Group) {
5095       DEBUG(dbgs() << "LV: Creating an interleave group with:" << *A << '\n');
5096       Group = createInterleaveGroup(A, DesA.Stride, DesA.Align);
5097     }
5098 
5099     if (A->mayWriteToMemory())
5100       StoreGroups.insert(Group);
5101     else
5102       LoadGroups.insert(Group);
5103 
5104     for (auto II = std::next(I); II != E; ++II) {
5105       Instruction *B = II->first;
5106       StrideDescriptor DesB = II->second;
5107 
5108       // Ignore if B is already in a group or B is a different memory operation.
5109       if (isInterleaved(B) || A->mayReadFromMemory() != B->mayReadFromMemory())
5110         continue;
5111 
5112       // Check the rule 1 and 2.
5113       if (DesB.Stride != DesA.Stride || DesB.Size != DesA.Size)
5114         continue;
5115 
5116       // Calculate the distance and prepare for the rule 3.
5117       const SCEVConstant *DistToA = dyn_cast<SCEVConstant>(
5118           PSE.getSE()->getMinusSCEV(DesB.Scev, DesA.Scev));
5119       if (!DistToA)
5120         continue;
5121 
5122       int DistanceToA = DistToA->getAPInt().getSExtValue();
5123 
5124       // Skip if the distance is not multiple of size as they are not in the
5125       // same group.
5126       if (DistanceToA % static_cast<int>(DesA.Size))
5127         continue;
5128 
5129       // The index of B is the index of A plus the related index to A.
5130       int IndexB =
5131           Group->getIndex(A) + DistanceToA / static_cast<int>(DesA.Size);
5132 
5133       // Try to insert B into the group.
5134       if (Group->insertMember(B, IndexB, DesB.Align)) {
5135         DEBUG(dbgs() << "LV: Inserted:" << *B << '\n'
5136                      << "    into the interleave group with" << *A << '\n');
5137         InterleaveGroupMap[B] = Group;
5138 
5139         // Set the first load in program order as the insert position.
5140         if (B->mayReadFromMemory())
5141           Group->setInsertPos(B);
5142       }
5143     } // Iteration on instruction B
5144   }   // Iteration on instruction A
5145 
5146   // Remove interleaved store groups with gaps.
5147   for (InterleaveGroup *Group : StoreGroups)
5148     if (Group->getNumMembers() != Group->getFactor())
5149       releaseGroup(Group);
5150 
5151   // If there is a non-reversed interleaved load group with gaps, we will need
5152   // to execute at least one scalar epilogue iteration. This will ensure that
5153   // we don't speculatively access memory out-of-bounds. Note that we only need
5154   // to look for a member at index factor - 1, since every group must have a
5155   // member at index zero.
5156   for (InterleaveGroup *Group : LoadGroups)
5157     if (!Group->getMember(Group->getFactor() - 1)) {
5158       if (Group->isReverse()) {
5159         releaseGroup(Group);
5160       } else {
5161         DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
5162         RequiresScalarEpilogue = true;
5163       }
5164     }
5165 }
5166 
5167 LoopVectorizationCostModel::VectorizationFactor
5168 LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) {
5169   // Width 1 means no vectorize
5170   VectorizationFactor Factor = {1U, 0U};
5171   if (OptForSize && Legal->getRuntimePointerChecking()->Need) {
5172     emitAnalysis(
5173         VectorizationReport()
5174         << "runtime pointer checks needed. Enable vectorization of this "
5175            "loop with '#pragma clang loop vectorize(enable)' when "
5176            "compiling with -Os/-Oz");
5177     DEBUG(dbgs()
5178           << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n");
5179     return Factor;
5180   }
5181 
5182   if (!EnableCondStoresVectorization && Legal->getNumPredStores()) {
5183     emitAnalysis(
5184         VectorizationReport()
5185         << "store that is conditionally executed prevents vectorization");
5186     DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n");
5187     return Factor;
5188   }
5189 
5190   // Find the trip count.
5191   unsigned TC = SE->getSmallConstantTripCount(TheLoop);
5192   DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5193 
5194   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5195   unsigned SmallestType, WidestType;
5196   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5197   unsigned WidestRegister = TTI.getRegisterBitWidth(true);
5198   unsigned MaxSafeDepDist = -1U;
5199 
5200   // Get the maximum safe dependence distance in bits computed by LAA. If the
5201   // loop contains any interleaved accesses, we divide the dependence distance
5202   // by the maximum interleave factor of all interleaved groups. Note that
5203   // although the division ensures correctness, this is a fairly conservative
5204   // computation because the maximum distance computed by LAA may not involve
5205   // any of the interleaved accesses.
5206   if (Legal->getMaxSafeDepDistBytes() != -1U)
5207     MaxSafeDepDist =
5208         Legal->getMaxSafeDepDistBytes() * 8 / Legal->getMaxInterleaveFactor();
5209 
5210   WidestRegister =
5211       ((WidestRegister < MaxSafeDepDist) ? WidestRegister : MaxSafeDepDist);
5212   unsigned MaxVectorSize = WidestRegister / WidestType;
5213 
5214   DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / "
5215                << WidestType << " bits.\n");
5216   DEBUG(dbgs() << "LV: The Widest register is: " << WidestRegister
5217                << " bits.\n");
5218 
5219   if (MaxVectorSize == 0) {
5220     DEBUG(dbgs() << "LV: The target has no vector registers.\n");
5221     MaxVectorSize = 1;
5222   }
5223 
5224   assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements"
5225                                 " into one vector!");
5226 
5227   unsigned VF = MaxVectorSize;
5228   if (MaximizeBandwidth && !OptForSize) {
5229     // Collect all viable vectorization factors.
5230     SmallVector<unsigned, 8> VFs;
5231     unsigned NewMaxVectorSize = WidestRegister / SmallestType;
5232     for (unsigned VS = MaxVectorSize; VS <= NewMaxVectorSize; VS *= 2)
5233       VFs.push_back(VS);
5234 
5235     // For each VF calculate its register usage.
5236     auto RUs = calculateRegisterUsage(VFs);
5237 
5238     // Select the largest VF which doesn't require more registers than existing
5239     // ones.
5240     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true);
5241     for (int i = RUs.size() - 1; i >= 0; --i) {
5242       if (RUs[i].MaxLocalUsers <= TargetNumRegisters) {
5243         VF = VFs[i];
5244         break;
5245       }
5246     }
5247   }
5248 
5249   // If we optimize the program for size, avoid creating the tail loop.
5250   if (OptForSize) {
5251     // If we are unable to calculate the trip count then don't try to vectorize.
5252     if (TC < 2) {
5253       emitAnalysis(
5254           VectorizationReport()
5255           << "unable to calculate the loop count due to complex control flow");
5256       DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
5257       return Factor;
5258     }
5259 
5260     // Find the maximum SIMD width that can fit within the trip count.
5261     VF = TC % MaxVectorSize;
5262 
5263     if (VF == 0)
5264       VF = MaxVectorSize;
5265     else {
5266       // If the trip count that we found modulo the vectorization factor is not
5267       // zero then we require a tail.
5268       emitAnalysis(VectorizationReport()
5269                    << "cannot optimize for size and vectorize at the "
5270                       "same time. Enable vectorization of this loop "
5271                       "with '#pragma clang loop vectorize(enable)' "
5272                       "when compiling with -Os/-Oz");
5273       DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
5274       return Factor;
5275     }
5276   }
5277 
5278   int UserVF = Hints->getWidth();
5279   if (UserVF != 0) {
5280     assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
5281     DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
5282 
5283     Factor.Width = UserVF;
5284     return Factor;
5285   }
5286 
5287   float Cost = expectedCost(1).first;
5288 #ifndef NDEBUG
5289   const float ScalarCost = Cost;
5290 #endif /* NDEBUG */
5291   unsigned Width = 1;
5292   DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
5293 
5294   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5295   // Ignore scalar width, because the user explicitly wants vectorization.
5296   if (ForceVectorization && VF > 1) {
5297     Width = 2;
5298     Cost = expectedCost(Width).first / (float)Width;
5299   }
5300 
5301   for (unsigned i = 2; i <= VF; i *= 2) {
5302     // Notice that the vector loop needs to be executed less times, so
5303     // we need to divide the cost of the vector loops by the width of
5304     // the vector elements.
5305     VectorizationCostTy C = expectedCost(i);
5306     float VectorCost = C.first / (float)i;
5307     DEBUG(dbgs() << "LV: Vector loop of width " << i
5308                  << " costs: " << (int)VectorCost << ".\n");
5309     if (!C.second && !ForceVectorization) {
5310       DEBUG(
5311           dbgs() << "LV: Not considering vector loop of width " << i
5312                  << " because it will not generate any vector instructions.\n");
5313       continue;
5314     }
5315     if (VectorCost < Cost) {
5316       Cost = VectorCost;
5317       Width = i;
5318     }
5319   }
5320 
5321   DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
5322         << "LV: Vectorization seems to be not beneficial, "
5323         << "but was forced by a user.\n");
5324   DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
5325   Factor.Width = Width;
5326   Factor.Cost = Width * Cost;
5327   return Factor;
5328 }
5329 
5330 std::pair<unsigned, unsigned>
5331 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
5332   unsigned MinWidth = -1U;
5333   unsigned MaxWidth = 8;
5334   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5335 
5336   // For each block.
5337   for (Loop::block_iterator bb = TheLoop->block_begin(),
5338                             be = TheLoop->block_end();
5339        bb != be; ++bb) {
5340     BasicBlock *BB = *bb;
5341 
5342     // For each instruction in the loop.
5343     for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
5344       Type *T = it->getType();
5345 
5346       // Skip ignored values.
5347       if (ValuesToIgnore.count(&*it))
5348         continue;
5349 
5350       // Only examine Loads, Stores and PHINodes.
5351       if (!isa<LoadInst>(it) && !isa<StoreInst>(it) && !isa<PHINode>(it))
5352         continue;
5353 
5354       // Examine PHI nodes that are reduction variables. Update the type to
5355       // account for the recurrence type.
5356       if (PHINode *PN = dyn_cast<PHINode>(it)) {
5357         if (!Legal->isReductionVariable(PN))
5358           continue;
5359         RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN];
5360         T = RdxDesc.getRecurrenceType();
5361       }
5362 
5363       // Examine the stored values.
5364       if (StoreInst *ST = dyn_cast<StoreInst>(it))
5365         T = ST->getValueOperand()->getType();
5366 
5367       // Ignore loaded pointer types and stored pointer types that are not
5368       // consecutive. However, we do want to take consecutive stores/loads of
5369       // pointer vectors into account.
5370       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&*it))
5371         continue;
5372 
5373       MinWidth = std::min(MinWidth,
5374                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5375       MaxWidth = std::max(MaxWidth,
5376                           (unsigned)DL.getTypeSizeInBits(T->getScalarType()));
5377     }
5378   }
5379 
5380   return {MinWidth, MaxWidth};
5381 }
5382 
5383 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize,
5384                                                            unsigned VF,
5385                                                            unsigned LoopCost) {
5386 
5387   // -- The interleave heuristics --
5388   // We interleave the loop in order to expose ILP and reduce the loop overhead.
5389   // There are many micro-architectural considerations that we can't predict
5390   // at this level. For example, frontend pressure (on decode or fetch) due to
5391   // code size, or the number and capabilities of the execution ports.
5392   //
5393   // We use the following heuristics to select the interleave count:
5394   // 1. If the code has reductions, then we interleave to break the cross
5395   // iteration dependency.
5396   // 2. If the loop is really small, then we interleave to reduce the loop
5397   // overhead.
5398   // 3. We don't interleave if we think that we will spill registers to memory
5399   // due to the increased register pressure.
5400 
5401   // When we optimize for size, we don't interleave.
5402   if (OptForSize)
5403     return 1;
5404 
5405   // We used the distance for the interleave count.
5406   if (Legal->getMaxSafeDepDistBytes() != -1U)
5407     return 1;
5408 
5409   // Do not interleave loops with a relatively small trip count.
5410   unsigned TC = SE->getSmallConstantTripCount(TheLoop);
5411   if (TC > 1 && TC < TinyTripCountInterleaveThreshold)
5412     return 1;
5413 
5414   unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1);
5415   DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
5416                << " registers\n");
5417 
5418   if (VF == 1) {
5419     if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
5420       TargetNumRegisters = ForceTargetNumScalarRegs;
5421   } else {
5422     if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
5423       TargetNumRegisters = ForceTargetNumVectorRegs;
5424   }
5425 
5426   RegisterUsage R = calculateRegisterUsage({VF})[0];
5427   // We divide by these constants so assume that we have at least one
5428   // instruction that uses at least one register.
5429   R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U);
5430   R.NumInstructions = std::max(R.NumInstructions, 1U);
5431 
5432   // We calculate the interleave count using the following formula.
5433   // Subtract the number of loop invariants from the number of available
5434   // registers. These registers are used by all of the interleaved instances.
5435   // Next, divide the remaining registers by the number of registers that is
5436   // required by the loop, in order to estimate how many parallel instances
5437   // fit without causing spills. All of this is rounded down if necessary to be
5438   // a power of two. We want power of two interleave count to simplify any
5439   // addressing operations or alignment considerations.
5440   unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) /
5441                               R.MaxLocalUsers);
5442 
5443   // Don't count the induction variable as interleaved.
5444   if (EnableIndVarRegisterHeur)
5445     IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) /
5446                        std::max(1U, (R.MaxLocalUsers - 1)));
5447 
5448   // Clamp the interleave ranges to reasonable counts.
5449   unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
5450 
5451   // Check if the user has overridden the max.
5452   if (VF == 1) {
5453     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
5454       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
5455   } else {
5456     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
5457       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
5458   }
5459 
5460   // If we did not calculate the cost for VF (because the user selected the VF)
5461   // then we calculate the cost of VF here.
5462   if (LoopCost == 0)
5463     LoopCost = expectedCost(VF).first;
5464 
5465   // Clamp the calculated IC to be between the 1 and the max interleave count
5466   // that the target allows.
5467   if (IC > MaxInterleaveCount)
5468     IC = MaxInterleaveCount;
5469   else if (IC < 1)
5470     IC = 1;
5471 
5472   // Interleave if we vectorized this loop and there is a reduction that could
5473   // benefit from interleaving.
5474   if (VF > 1 && Legal->getReductionVars()->size()) {
5475     DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
5476     return IC;
5477   }
5478 
5479   // Note that if we've already vectorized the loop we will have done the
5480   // runtime check and so interleaving won't require further checks.
5481   bool InterleavingRequiresRuntimePointerCheck =
5482       (VF == 1 && Legal->getRuntimePointerChecking()->Need);
5483 
5484   // We want to interleave small loops in order to reduce the loop overhead and
5485   // potentially expose ILP opportunities.
5486   DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
5487   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
5488     // We assume that the cost overhead is 1 and we use the cost model
5489     // to estimate the cost of the loop and interleave until the cost of the
5490     // loop overhead is about 5% of the cost of the loop.
5491     unsigned SmallIC =
5492         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
5493 
5494     // Interleave until store/load ports (estimated by max interleave count) are
5495     // saturated.
5496     unsigned NumStores = Legal->getNumStores();
5497     unsigned NumLoads = Legal->getNumLoads();
5498     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
5499     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
5500 
5501     // If we have a scalar reduction (vector reductions are already dealt with
5502     // by this point), we can increase the critical path length if the loop
5503     // we're interleaving is inside another loop. Limit, by default to 2, so the
5504     // critical path only gets increased by one reduction operation.
5505     if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) {
5506       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
5507       SmallIC = std::min(SmallIC, F);
5508       StoresIC = std::min(StoresIC, F);
5509       LoadsIC = std::min(LoadsIC, F);
5510     }
5511 
5512     if (EnableLoadStoreRuntimeInterleave &&
5513         std::max(StoresIC, LoadsIC) > SmallIC) {
5514       DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n");
5515       return std::max(StoresIC, LoadsIC);
5516     }
5517 
5518     DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
5519     return SmallIC;
5520   }
5521 
5522   // Interleave if this is a large loop (small loops are already dealt with by
5523   // this point) that could benefit from interleaving.
5524   bool HasReductions = (Legal->getReductionVars()->size() > 0);
5525   if (TTI.enableAggressiveInterleaving(HasReductions)) {
5526     DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
5527     return IC;
5528   }
5529 
5530   DEBUG(dbgs() << "LV: Not Interleaving.\n");
5531   return 1;
5532 }
5533 
5534 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
5535 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
5536   // This function calculates the register usage by measuring the highest number
5537   // of values that are alive at a single location. Obviously, this is a very
5538   // rough estimation. We scan the loop in a topological order in order and
5539   // assign a number to each instruction. We use RPO to ensure that defs are
5540   // met before their users. We assume that each instruction that has in-loop
5541   // users starts an interval. We record every time that an in-loop value is
5542   // used, so we have a list of the first and last occurrences of each
5543   // instruction. Next, we transpose this data structure into a multi map that
5544   // holds the list of intervals that *end* at a specific location. This multi
5545   // map allows us to perform a linear search. We scan the instructions linearly
5546   // and record each time that a new interval starts, by placing it in a set.
5547   // If we find this value in the multi-map then we remove it from the set.
5548   // The max register usage is the maximum size of the set.
5549   // We also search for instructions that are defined outside the loop, but are
5550   // used inside the loop. We need this number separately from the max-interval
5551   // usage number because when we unroll, loop-invariant values do not take
5552   // more register.
5553   LoopBlocksDFS DFS(TheLoop);
5554   DFS.perform(LI);
5555 
5556   RegisterUsage RU;
5557   RU.NumInstructions = 0;
5558 
5559   // Each 'key' in the map opens a new interval. The values
5560   // of the map are the index of the 'last seen' usage of the
5561   // instruction that is the key.
5562   typedef DenseMap<Instruction *, unsigned> IntervalMap;
5563   // Maps instruction to its index.
5564   DenseMap<unsigned, Instruction *> IdxToInstr;
5565   // Marks the end of each interval.
5566   IntervalMap EndPoint;
5567   // Saves the list of instruction indices that are used in the loop.
5568   SmallSet<Instruction *, 8> Ends;
5569   // Saves the list of values that are used in the loop but are
5570   // defined outside the loop, such as arguments and constants.
5571   SmallPtrSet<Value *, 8> LoopInvariants;
5572 
5573   unsigned Index = 0;
5574   for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(), be = DFS.endRPO();
5575        bb != be; ++bb) {
5576     RU.NumInstructions += (*bb)->size();
5577     for (Instruction &I : **bb) {
5578       IdxToInstr[Index++] = &I;
5579 
5580       // Save the end location of each USE.
5581       for (unsigned i = 0; i < I.getNumOperands(); ++i) {
5582         Value *U = I.getOperand(i);
5583         Instruction *Instr = dyn_cast<Instruction>(U);
5584 
5585         // Ignore non-instruction values such as arguments, constants, etc.
5586         if (!Instr)
5587           continue;
5588 
5589         // If this instruction is outside the loop then record it and continue.
5590         if (!TheLoop->contains(Instr)) {
5591           LoopInvariants.insert(Instr);
5592           continue;
5593         }
5594 
5595         // Overwrite previous end points.
5596         EndPoint[Instr] = Index;
5597         Ends.insert(Instr);
5598       }
5599     }
5600   }
5601 
5602   // Saves the list of intervals that end with the index in 'key'.
5603   typedef SmallVector<Instruction *, 2> InstrList;
5604   DenseMap<unsigned, InstrList> TransposeEnds;
5605 
5606   // Transpose the EndPoints to a list of values that end at each index.
5607   for (IntervalMap::iterator it = EndPoint.begin(), e = EndPoint.end(); it != e;
5608        ++it)
5609     TransposeEnds[it->second].push_back(it->first);
5610 
5611   SmallSet<Instruction *, 8> OpenIntervals;
5612 
5613   // Get the size of the widest register.
5614   unsigned MaxSafeDepDist = -1U;
5615   if (Legal->getMaxSafeDepDistBytes() != -1U)
5616     MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
5617   unsigned WidestRegister =
5618       std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist);
5619   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
5620 
5621   SmallVector<RegisterUsage, 8> RUs(VFs.size());
5622   SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0);
5623 
5624   DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
5625 
5626   // A lambda that gets the register usage for the given type and VF.
5627   auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
5628     if (Ty->isTokenTy())
5629       return 0U;
5630     unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
5631     return std::max<unsigned>(1, VF * TypeSize / WidestRegister);
5632   };
5633 
5634   for (unsigned int i = 0; i < Index; ++i) {
5635     Instruction *I = IdxToInstr[i];
5636     // Ignore instructions that are never used within the loop.
5637     if (!Ends.count(I))
5638       continue;
5639 
5640     // Skip ignored values.
5641     if (ValuesToIgnore.count(I))
5642       continue;
5643 
5644     // Remove all of the instructions that end at this location.
5645     InstrList &List = TransposeEnds[i];
5646     for (unsigned int j = 0, e = List.size(); j < e; ++j)
5647       OpenIntervals.erase(List[j]);
5648 
5649     // For each VF find the maximum usage of registers.
5650     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
5651       if (VFs[j] == 1) {
5652         MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size());
5653         continue;
5654       }
5655 
5656       // Count the number of live intervals.
5657       unsigned RegUsage = 0;
5658       for (auto Inst : OpenIntervals)
5659         RegUsage += GetRegUsage(Inst->getType(), VFs[j]);
5660       MaxUsages[j] = std::max(MaxUsages[j], RegUsage);
5661     }
5662 
5663     DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
5664                  << OpenIntervals.size() << '\n');
5665 
5666     // Add the current instruction to the list of open intervals.
5667     OpenIntervals.insert(I);
5668   }
5669 
5670   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
5671     unsigned Invariant = 0;
5672     if (VFs[i] == 1)
5673       Invariant = LoopInvariants.size();
5674     else {
5675       for (auto Inst : LoopInvariants)
5676         Invariant += GetRegUsage(Inst->getType(), VFs[i]);
5677     }
5678 
5679     DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n');
5680     DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n');
5681     DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n');
5682     DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n');
5683 
5684     RU.LoopInvariantRegs = Invariant;
5685     RU.MaxLocalUsers = MaxUsages[i];
5686     RUs[i] = RU;
5687   }
5688 
5689   return RUs;
5690 }
5691 
5692 LoopVectorizationCostModel::VectorizationCostTy
5693 LoopVectorizationCostModel::expectedCost(unsigned VF) {
5694   VectorizationCostTy Cost;
5695 
5696   // For each block.
5697   for (Loop::block_iterator bb = TheLoop->block_begin(),
5698                             be = TheLoop->block_end();
5699        bb != be; ++bb) {
5700     VectorizationCostTy BlockCost;
5701     BasicBlock *BB = *bb;
5702 
5703     // For each instruction in the old loop.
5704     for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
5705       // Skip dbg intrinsics.
5706       if (isa<DbgInfoIntrinsic>(it))
5707         continue;
5708 
5709       // Skip ignored values.
5710       if (ValuesToIgnore.count(&*it))
5711         continue;
5712 
5713       VectorizationCostTy C = getInstructionCost(&*it, VF);
5714 
5715       // Check if we should override the cost.
5716       if (ForceTargetInstructionCost.getNumOccurrences() > 0)
5717         C.first = ForceTargetInstructionCost;
5718 
5719       BlockCost.first += C.first;
5720       BlockCost.second |= C.second;
5721       DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF "
5722                    << VF << " For instruction: " << *it << '\n');
5723     }
5724 
5725     // We assume that if-converted blocks have a 50% chance of being executed.
5726     // When the code is scalar then some of the blocks are avoided due to CF.
5727     // When the code is vectorized we execute all code paths.
5728     if (VF == 1 && Legal->blockNeedsPredication(*bb))
5729       BlockCost.first /= 2;
5730 
5731     Cost.first += BlockCost.first;
5732     Cost.second |= BlockCost.second;
5733   }
5734 
5735   return Cost;
5736 }
5737 
5738 /// \brief Check if the load/store instruction \p I may be translated into
5739 /// gather/scatter during vectorization.
5740 ///
5741 /// Pointer \p Ptr specifies address in memory for the given scalar memory
5742 /// instruction. We need it to retrieve data type.
5743 /// Using gather/scatter is possible when it is supported by target.
5744 static bool isGatherOrScatterLegal(Instruction *I, Value *Ptr,
5745                                    LoopVectorizationLegality *Legal) {
5746   Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
5747   return (isa<LoadInst>(I) && Legal->isLegalMaskedGather(DataTy)) ||
5748          (isa<StoreInst>(I) && Legal->isLegalMaskedScatter(DataTy));
5749 }
5750 
5751 /// \brief Check whether the address computation for a non-consecutive memory
5752 /// access looks like an unlikely candidate for being merged into the indexing
5753 /// mode.
5754 ///
5755 /// We look for a GEP which has one index that is an induction variable and all
5756 /// other indices are loop invariant. If the stride of this access is also
5757 /// within a small bound we decide that this address computation can likely be
5758 /// merged into the addressing mode.
5759 /// In all other cases, we identify the address computation as complex.
5760 static bool isLikelyComplexAddressComputation(Value *Ptr,
5761                                               LoopVectorizationLegality *Legal,
5762                                               ScalarEvolution *SE,
5763                                               const Loop *TheLoop) {
5764   GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(Ptr);
5765   if (!Gep)
5766     return true;
5767 
5768   // We are looking for a gep with all loop invariant indices except for one
5769   // which should be an induction variable.
5770   unsigned NumOperands = Gep->getNumOperands();
5771   for (unsigned i = 1; i < NumOperands; ++i) {
5772     Value *Opd = Gep->getOperand(i);
5773     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
5774         !Legal->isInductionVariable(Opd))
5775       return true;
5776   }
5777 
5778   // Now we know we have a GEP ptr, %inv, %ind, %inv. Make sure that the step
5779   // can likely be merged into the address computation.
5780   unsigned MaxMergeDistance = 64;
5781 
5782   const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Ptr));
5783   if (!AddRec)
5784     return true;
5785 
5786   // Check the step is constant.
5787   const SCEV *Step = AddRec->getStepRecurrence(*SE);
5788   // Calculate the pointer stride and check if it is consecutive.
5789   const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
5790   if (!C)
5791     return true;
5792 
5793   const APInt &APStepVal = C->getAPInt();
5794 
5795   // Huge step value - give up.
5796   if (APStepVal.getBitWidth() > 64)
5797     return true;
5798 
5799   int64_t StepVal = APStepVal.getSExtValue();
5800 
5801   return StepVal > MaxMergeDistance;
5802 }
5803 
5804 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
5805   return Legal->hasStride(I->getOperand(0)) ||
5806          Legal->hasStride(I->getOperand(1));
5807 }
5808 
5809 LoopVectorizationCostModel::VectorizationCostTy
5810 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
5811   // If we know that this instruction will remain uniform, check the cost of
5812   // the scalar version.
5813   if (Legal->isUniformAfterVectorization(I))
5814     VF = 1;
5815 
5816   Type *VectorTy;
5817   unsigned C = getInstructionCost(I, VF, VectorTy);
5818 
5819   bool TypeNotScalarized =
5820       VF > 1 && !VectorTy->isVoidTy() && TTI.getNumberOfParts(VectorTy) < VF;
5821   return VectorizationCostTy(C, TypeNotScalarized);
5822 }
5823 
5824 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
5825                                                         unsigned VF,
5826                                                         Type *&VectorTy) {
5827   Type *RetTy = I->getType();
5828   if (VF > 1 && MinBWs.count(I))
5829     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
5830   VectorTy = ToVectorTy(RetTy, VF);
5831 
5832   // TODO: We need to estimate the cost of intrinsic calls.
5833   switch (I->getOpcode()) {
5834   case Instruction::GetElementPtr:
5835     // We mark this instruction as zero-cost because the cost of GEPs in
5836     // vectorized code depends on whether the corresponding memory instruction
5837     // is scalarized or not. Therefore, we handle GEPs with the memory
5838     // instruction cost.
5839     return 0;
5840   case Instruction::Br: {
5841     return TTI.getCFInstrCost(I->getOpcode());
5842   }
5843   case Instruction::PHI: {
5844     auto *Phi = cast<PHINode>(I);
5845 
5846     // First-order recurrences are replaced by vector shuffles inside the loop.
5847     if (VF > 1 && Legal->isFirstOrderRecurrence(Phi))
5848       return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
5849                                 VectorTy, VF - 1, VectorTy);
5850 
5851     // TODO: IF-converted IFs become selects.
5852     return 0;
5853   }
5854   case Instruction::Add:
5855   case Instruction::FAdd:
5856   case Instruction::Sub:
5857   case Instruction::FSub:
5858   case Instruction::Mul:
5859   case Instruction::FMul:
5860   case Instruction::UDiv:
5861   case Instruction::SDiv:
5862   case Instruction::FDiv:
5863   case Instruction::URem:
5864   case Instruction::SRem:
5865   case Instruction::FRem:
5866   case Instruction::Shl:
5867   case Instruction::LShr:
5868   case Instruction::AShr:
5869   case Instruction::And:
5870   case Instruction::Or:
5871   case Instruction::Xor: {
5872     // Since we will replace the stride by 1 the multiplication should go away.
5873     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
5874       return 0;
5875     // Certain instructions can be cheaper to vectorize if they have a constant
5876     // second vector operand. One example of this are shifts on x86.
5877     TargetTransformInfo::OperandValueKind Op1VK =
5878         TargetTransformInfo::OK_AnyValue;
5879     TargetTransformInfo::OperandValueKind Op2VK =
5880         TargetTransformInfo::OK_AnyValue;
5881     TargetTransformInfo::OperandValueProperties Op1VP =
5882         TargetTransformInfo::OP_None;
5883     TargetTransformInfo::OperandValueProperties Op2VP =
5884         TargetTransformInfo::OP_None;
5885     Value *Op2 = I->getOperand(1);
5886 
5887     // Check for a splat of a constant or for a non uniform vector of constants.
5888     if (isa<ConstantInt>(Op2)) {
5889       ConstantInt *CInt = cast<ConstantInt>(Op2);
5890       if (CInt && CInt->getValue().isPowerOf2())
5891         Op2VP = TargetTransformInfo::OP_PowerOf2;
5892       Op2VK = TargetTransformInfo::OK_UniformConstantValue;
5893     } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) {
5894       Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
5895       Constant *SplatValue = cast<Constant>(Op2)->getSplatValue();
5896       if (SplatValue) {
5897         ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue);
5898         if (CInt && CInt->getValue().isPowerOf2())
5899           Op2VP = TargetTransformInfo::OP_PowerOf2;
5900         Op2VK = TargetTransformInfo::OK_UniformConstantValue;
5901       }
5902     }
5903 
5904     return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, Op2VK,
5905                                       Op1VP, Op2VP);
5906   }
5907   case Instruction::Select: {
5908     SelectInst *SI = cast<SelectInst>(I);
5909     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
5910     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
5911     Type *CondTy = SI->getCondition()->getType();
5912     if (!ScalarCond)
5913       CondTy = VectorType::get(CondTy, VF);
5914 
5915     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy);
5916   }
5917   case Instruction::ICmp:
5918   case Instruction::FCmp: {
5919     Type *ValTy = I->getOperand(0)->getType();
5920     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
5921     auto It = MinBWs.find(Op0AsInstruction);
5922     if (VF > 1 && It != MinBWs.end())
5923       ValTy = IntegerType::get(ValTy->getContext(), It->second);
5924     VectorTy = ToVectorTy(ValTy, VF);
5925     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy);
5926   }
5927   case Instruction::Store:
5928   case Instruction::Load: {
5929     StoreInst *SI = dyn_cast<StoreInst>(I);
5930     LoadInst *LI = dyn_cast<LoadInst>(I);
5931     Type *ValTy = (SI ? SI->getValueOperand()->getType() : LI->getType());
5932     VectorTy = ToVectorTy(ValTy, VF);
5933 
5934     unsigned Alignment = SI ? SI->getAlignment() : LI->getAlignment();
5935     unsigned AS =
5936         SI ? SI->getPointerAddressSpace() : LI->getPointerAddressSpace();
5937     Value *Ptr = SI ? SI->getPointerOperand() : LI->getPointerOperand();
5938     // We add the cost of address computation here instead of with the gep
5939     // instruction because only here we know whether the operation is
5940     // scalarized.
5941     if (VF == 1)
5942       return TTI.getAddressComputationCost(VectorTy) +
5943              TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
5944 
5945     if (LI && Legal->isUniform(Ptr)) {
5946       // Scalar load + broadcast
5947       unsigned Cost = TTI.getAddressComputationCost(ValTy->getScalarType());
5948       Cost += TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(),
5949                                   Alignment, AS);
5950       return Cost +
5951              TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, ValTy);
5952     }
5953 
5954     // For an interleaved access, calculate the total cost of the whole
5955     // interleave group.
5956     if (Legal->isAccessInterleaved(I)) {
5957       auto Group = Legal->getInterleavedAccessGroup(I);
5958       assert(Group && "Fail to get an interleaved access group.");
5959 
5960       // Only calculate the cost once at the insert position.
5961       if (Group->getInsertPos() != I)
5962         return 0;
5963 
5964       unsigned InterleaveFactor = Group->getFactor();
5965       Type *WideVecTy =
5966           VectorType::get(VectorTy->getVectorElementType(),
5967                           VectorTy->getVectorNumElements() * InterleaveFactor);
5968 
5969       // Holds the indices of existing members in an interleaved load group.
5970       // An interleaved store group doesn't need this as it dones't allow gaps.
5971       SmallVector<unsigned, 4> Indices;
5972       if (LI) {
5973         for (unsigned i = 0; i < InterleaveFactor; i++)
5974           if (Group->getMember(i))
5975             Indices.push_back(i);
5976       }
5977 
5978       // Calculate the cost of the whole interleaved group.
5979       unsigned Cost = TTI.getInterleavedMemoryOpCost(
5980           I->getOpcode(), WideVecTy, Group->getFactor(), Indices,
5981           Group->getAlignment(), AS);
5982 
5983       if (Group->isReverse())
5984         Cost +=
5985             Group->getNumMembers() *
5986             TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
5987 
5988       // FIXME: The interleaved load group with a huge gap could be even more
5989       // expensive than scalar operations. Then we could ignore such group and
5990       // use scalar operations instead.
5991       return Cost;
5992     }
5993 
5994     // Scalarized loads/stores.
5995     int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
5996     bool UseGatherOrScatter =
5997         (ConsecutiveStride == 0) && isGatherOrScatterLegal(I, Ptr, Legal);
5998 
5999     bool Reverse = ConsecutiveStride < 0;
6000     const DataLayout &DL = I->getModule()->getDataLayout();
6001     unsigned ScalarAllocatedSize = DL.getTypeAllocSize(ValTy);
6002     unsigned VectorElementSize = DL.getTypeStoreSize(VectorTy) / VF;
6003     if ((!ConsecutiveStride && !UseGatherOrScatter) ||
6004         ScalarAllocatedSize != VectorElementSize) {
6005       bool IsComplexComputation =
6006           isLikelyComplexAddressComputation(Ptr, Legal, SE, TheLoop);
6007       unsigned Cost = 0;
6008       // The cost of extracting from the value vector and pointer vector.
6009       Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6010       for (unsigned i = 0; i < VF; ++i) {
6011         //  The cost of extracting the pointer operand.
6012         Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, PtrTy, i);
6013         // In case of STORE, the cost of ExtractElement from the vector.
6014         // In case of LOAD, the cost of InsertElement into the returned
6015         // vector.
6016         Cost += TTI.getVectorInstrCost(SI ? Instruction::ExtractElement
6017                                           : Instruction::InsertElement,
6018                                        VectorTy, i);
6019       }
6020 
6021       // The cost of the scalar loads/stores.
6022       Cost += VF * TTI.getAddressComputationCost(PtrTy, IsComplexComputation);
6023       Cost += VF *
6024               TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(),
6025                                   Alignment, AS);
6026       return Cost;
6027     }
6028 
6029     unsigned Cost = TTI.getAddressComputationCost(VectorTy);
6030     if (UseGatherOrScatter) {
6031       assert(ConsecutiveStride == 0 &&
6032              "Gather/Scatter are not used for consecutive stride");
6033       return Cost +
6034              TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
6035                                         Legal->isMaskRequired(I), Alignment);
6036     }
6037     // Wide load/stores.
6038     if (Legal->isMaskRequired(I))
6039       Cost +=
6040           TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
6041     else
6042       Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
6043 
6044     if (Reverse)
6045       Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
6046     return Cost;
6047   }
6048   case Instruction::ZExt:
6049   case Instruction::SExt:
6050   case Instruction::FPToUI:
6051   case Instruction::FPToSI:
6052   case Instruction::FPExt:
6053   case Instruction::PtrToInt:
6054   case Instruction::IntToPtr:
6055   case Instruction::SIToFP:
6056   case Instruction::UIToFP:
6057   case Instruction::Trunc:
6058   case Instruction::FPTrunc:
6059   case Instruction::BitCast: {
6060     // We optimize the truncation of induction variable.
6061     // The cost of these is the same as the scalar operation.
6062     if (I->getOpcode() == Instruction::Trunc &&
6063         Legal->isInductionVariable(I->getOperand(0)))
6064       return TTI.getCastInstrCost(I->getOpcode(), I->getType(),
6065                                   I->getOperand(0)->getType());
6066 
6067     Type *SrcScalarTy = I->getOperand(0)->getType();
6068     Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF);
6069     if (VF > 1 && MinBWs.count(I)) {
6070       // This cast is going to be shrunk. This may remove the cast or it might
6071       // turn it into slightly different cast. For example, if MinBW == 16,
6072       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
6073       //
6074       // Calculate the modified src and dest types.
6075       Type *MinVecTy = VectorTy;
6076       if (I->getOpcode() == Instruction::Trunc) {
6077         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
6078         VectorTy =
6079             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6080       } else if (I->getOpcode() == Instruction::ZExt ||
6081                  I->getOpcode() == Instruction::SExt) {
6082         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
6083         VectorTy =
6084             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
6085       }
6086     }
6087 
6088     return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy);
6089   }
6090   case Instruction::Call: {
6091     bool NeedToScalarize;
6092     CallInst *CI = cast<CallInst>(I);
6093     unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize);
6094     if (getVectorIntrinsicIDForCall(CI, TLI))
6095       return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI));
6096     return CallCost;
6097   }
6098   default: {
6099     // We are scalarizing the instruction. Return the cost of the scalar
6100     // instruction, plus the cost of insert and extract into vector
6101     // elements, times the vector width.
6102     unsigned Cost = 0;
6103 
6104     if (!RetTy->isVoidTy() && VF != 1) {
6105       unsigned InsCost =
6106           TTI.getVectorInstrCost(Instruction::InsertElement, VectorTy);
6107       unsigned ExtCost =
6108           TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy);
6109 
6110       // The cost of inserting the results plus extracting each one of the
6111       // operands.
6112       Cost += VF * (InsCost + ExtCost * I->getNumOperands());
6113     }
6114 
6115     // The cost of executing VF copies of the scalar instruction. This opcode
6116     // is unknown. Assume that it is the same as 'mul'.
6117     Cost += VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy);
6118     return Cost;
6119   }
6120   } // end of switch.
6121 }
6122 
6123 char LoopVectorize::ID = 0;
6124 static const char lv_name[] = "Loop Vectorization";
6125 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
6126 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
6127 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
6128 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
6129 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
6130 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
6131 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
6132 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
6133 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
6134 INITIALIZE_PASS_DEPENDENCY(LCSSA)
6135 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
6136 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
6137 INITIALIZE_PASS_DEPENDENCY(LoopAccessAnalysis)
6138 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
6139 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
6140 
6141 namespace llvm {
6142 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) {
6143   return new LoopVectorize(NoUnrolling, AlwaysVectorize);
6144 }
6145 }
6146 
6147 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
6148   // Check for a store.
6149   if (StoreInst *ST = dyn_cast<StoreInst>(Inst))
6150     return Legal->isConsecutivePtr(ST->getPointerOperand()) != 0;
6151 
6152   // Check for a load.
6153   if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
6154     return Legal->isConsecutivePtr(LI->getPointerOperand()) != 0;
6155 
6156   return false;
6157 }
6158 
6159 void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr,
6160                                              bool IfPredicateStore) {
6161   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
6162   // Holds vector parameters or scalars, in case of uniform vals.
6163   SmallVector<VectorParts, 4> Params;
6164 
6165   setDebugLocFromInst(Builder, Instr);
6166 
6167   // Find all of the vectorized parameters.
6168   for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
6169     Value *SrcOp = Instr->getOperand(op);
6170 
6171     // If we are accessing the old induction variable, use the new one.
6172     if (SrcOp == OldInduction) {
6173       Params.push_back(getVectorValue(SrcOp));
6174       continue;
6175     }
6176 
6177     // Try using previously calculated values.
6178     Instruction *SrcInst = dyn_cast<Instruction>(SrcOp);
6179 
6180     // If the src is an instruction that appeared earlier in the basic block
6181     // then it should already be vectorized.
6182     if (SrcInst && OrigLoop->contains(SrcInst)) {
6183       assert(WidenMap.has(SrcInst) && "Source operand is unavailable");
6184       // The parameter is a vector value from earlier.
6185       Params.push_back(WidenMap.get(SrcInst));
6186     } else {
6187       // The parameter is a scalar from outside the loop. Maybe even a constant.
6188       VectorParts Scalars;
6189       Scalars.append(UF, SrcOp);
6190       Params.push_back(Scalars);
6191     }
6192   }
6193 
6194   assert(Params.size() == Instr->getNumOperands() &&
6195          "Invalid number of operands");
6196 
6197   // Does this instruction return a value ?
6198   bool IsVoidRetTy = Instr->getType()->isVoidTy();
6199 
6200   Value *UndefVec = IsVoidRetTy ? nullptr : UndefValue::get(Instr->getType());
6201   // Create a new entry in the WidenMap and initialize it to Undef or Null.
6202   VectorParts &VecResults = WidenMap.splat(Instr, UndefVec);
6203 
6204   VectorParts Cond;
6205   if (IfPredicateStore) {
6206     assert(Instr->getParent()->getSinglePredecessor() &&
6207            "Only support single predecessor blocks");
6208     Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(),
6209                           Instr->getParent());
6210   }
6211 
6212   // For each vector unroll 'part':
6213   for (unsigned Part = 0; Part < UF; ++Part) {
6214     // For each scalar that we create:
6215 
6216     // Start an "if (pred) a[i] = ..." block.
6217     Value *Cmp = nullptr;
6218     if (IfPredicateStore) {
6219       if (Cond[Part]->getType()->isVectorTy())
6220         Cond[Part] =
6221             Builder.CreateExtractElement(Cond[Part], Builder.getInt32(0));
6222       Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cond[Part],
6223                                ConstantInt::get(Cond[Part]->getType(), 1));
6224     }
6225 
6226     Instruction *Cloned = Instr->clone();
6227     if (!IsVoidRetTy)
6228       Cloned->setName(Instr->getName() + ".cloned");
6229     // Replace the operands of the cloned instructions with extracted scalars.
6230     for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
6231       Value *Op = Params[op][Part];
6232       Cloned->setOperand(op, Op);
6233     }
6234 
6235     // Place the cloned scalar in the new loop.
6236     Builder.Insert(Cloned);
6237 
6238     // If we just cloned a new assumption, add it the assumption cache.
6239     if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
6240       if (II->getIntrinsicID() == Intrinsic::assume)
6241         AC->registerAssumption(II);
6242 
6243     // If the original scalar returns a value we need to place it in a vector
6244     // so that future users will be able to use it.
6245     if (!IsVoidRetTy)
6246       VecResults[Part] = Cloned;
6247 
6248     // End if-block.
6249     if (IfPredicateStore)
6250       PredicatedStores.push_back(std::make_pair(cast<StoreInst>(Cloned), Cmp));
6251   }
6252 }
6253 
6254 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) {
6255   StoreInst *SI = dyn_cast<StoreInst>(Instr);
6256   bool IfPredicateStore = (SI && Legal->blockNeedsPredication(SI->getParent()));
6257 
6258   return scalarizeInstruction(Instr, IfPredicateStore);
6259 }
6260 
6261 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
6262 
6263 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
6264 
6265 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx,
6266                                         const SCEV *StepSCEV) {
6267   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
6268   SCEVExpander Exp(*PSE.getSE(), DL, "induction");
6269   Value *StepValue = Exp.expandCodeFor(StepSCEV, StepSCEV->getType(),
6270                                        &*Builder.GetInsertPoint());
6271   return getStepVector(Val, StartIdx, StepValue);
6272 }
6273 
6274 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step) {
6275   // When unrolling and the VF is 1, we only need to add a simple scalar.
6276   Type *ITy = Val->getType();
6277   assert(!ITy->isVectorTy() && "Val must be a scalar");
6278   Constant *C = ConstantInt::get(ITy, StartIdx);
6279   return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
6280 }
6281