1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10 // stores that can be put together into vector-stores. Next, it attempts to
11 // construct vectorizable tree using the use-def chains. If a profitable tree
12 // was found, the SLP vectorizer performs vectorization on the tree.
13 //
14 // The pass is inspired by the work described in the paper:
15 //  "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
16 //
17 //===----------------------------------------------------------------------===//
18 #include "llvm/Transforms/Vectorize/SLPVectorizer.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/PostOrderIterator.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CodeMetrics.h"
24 #include "llvm/Analysis/GlobalsModRef.h"
25 #include "llvm/Analysis/LoopAccessAnalysis.h"
26 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/Analysis/VectorUtils.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/IRBuilder.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/Module.h"
35 #include "llvm/IR/NoFolder.h"
36 #include "llvm/IR/Type.h"
37 #include "llvm/IR/Value.h"
38 #include "llvm/IR/Verifier.h"
39 #include "llvm/Pass.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Transforms/Vectorize.h"
44 #include <algorithm>
45 #include <memory>
46 
47 using namespace llvm;
48 using namespace slpvectorizer;
49 
50 #define SV_NAME "slp-vectorizer"
51 #define DEBUG_TYPE "SLP"
52 
53 STATISTIC(NumVectorInstructions, "Number of vector instructions generated");
54 
55 static cl::opt<int>
56     SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
57                      cl::desc("Only vectorize if you gain more than this "
58                               "number "));
59 
60 static cl::opt<bool>
61 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden,
62                    cl::desc("Attempt to vectorize horizontal reductions"));
63 
64 static cl::opt<bool> ShouldStartVectorizeHorAtStore(
65     "slp-vectorize-hor-store", cl::init(false), cl::Hidden,
66     cl::desc(
67         "Attempt to vectorize horizontal reductions feeding into a store"));
68 
69 static cl::opt<int>
70 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden,
71     cl::desc("Attempt to vectorize for this register size in bits"));
72 
73 /// Limits the size of scheduling regions in a block.
74 /// It avoid long compile times for _very_ large blocks where vector
75 /// instructions are spread over a wide range.
76 /// This limit is way higher than needed by real-world functions.
77 static cl::opt<int>
78 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden,
79     cl::desc("Limit the size of the SLP scheduling region per block"));
80 
81 static cl::opt<int> MinVectorRegSizeOption(
82     "slp-min-reg-size", cl::init(128), cl::Hidden,
83     cl::desc("Attempt to vectorize for this register size in bits"));
84 
85 static cl::opt<unsigned> RecursionMaxDepth(
86     "slp-recursion-max-depth", cl::init(12), cl::Hidden,
87     cl::desc("Limit the recursion depth when building a vectorizable tree"));
88 
89 static cl::opt<unsigned> MinTreeSize(
90     "slp-min-tree-size", cl::init(3), cl::Hidden,
91     cl::desc("Only vectorize small trees if they are fully vectorizable"));
92 
93 // Limit the number of alias checks. The limit is chosen so that
94 // it has no negative effect on the llvm benchmarks.
95 static const unsigned AliasedCheckLimit = 10;
96 
97 // Another limit for the alias checks: The maximum distance between load/store
98 // instructions where alias checks are done.
99 // This limit is useful for very large basic blocks.
100 static const unsigned MaxMemDepDistance = 160;
101 
102 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling
103 /// regions to be handled.
104 static const int MinScheduleRegionSize = 16;
105 
106 /// \brief Predicate for the element types that the SLP vectorizer supports.
107 ///
108 /// The most important thing to filter here are types which are invalid in LLVM
109 /// vectors. We also filter target specific types which have absolutely no
110 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
111 /// avoids spending time checking the cost model and realizing that they will
112 /// be inevitably scalarized.
113 static bool isValidElementType(Type *Ty) {
114   return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
115          !Ty->isPPC_FP128Ty();
116 }
117 
118 /// \returns true if all of the instructions in \p VL are in the same block or
119 /// false otherwise.
120 static bool allSameBlock(ArrayRef<Value *> VL) {
121   Instruction *I0 = dyn_cast<Instruction>(VL[0]);
122   if (!I0)
123     return false;
124   BasicBlock *BB = I0->getParent();
125   for (int i = 1, e = VL.size(); i < e; i++) {
126     Instruction *I = dyn_cast<Instruction>(VL[i]);
127     if (!I)
128       return false;
129 
130     if (BB != I->getParent())
131       return false;
132   }
133   return true;
134 }
135 
136 /// \returns True if all of the values in \p VL are constants.
137 static bool allConstant(ArrayRef<Value *> VL) {
138   for (Value *i : VL)
139     if (!isa<Constant>(i))
140       return false;
141   return true;
142 }
143 
144 /// \returns True if all of the values in \p VL are identical.
145 static bool isSplat(ArrayRef<Value *> VL) {
146   for (unsigned i = 1, e = VL.size(); i < e; ++i)
147     if (VL[i] != VL[0])
148       return false;
149   return true;
150 }
151 
152 ///\returns Opcode that can be clubbed with \p Op to create an alternate
153 /// sequence which can later be merged as a ShuffleVector instruction.
154 static unsigned getAltOpcode(unsigned Op) {
155   switch (Op) {
156   case Instruction::FAdd:
157     return Instruction::FSub;
158   case Instruction::FSub:
159     return Instruction::FAdd;
160   case Instruction::Add:
161     return Instruction::Sub;
162   case Instruction::Sub:
163     return Instruction::Add;
164   default:
165     return 0;
166   }
167 }
168 
169 ///\returns bool representing if Opcode \p Op can be part
170 /// of an alternate sequence which can later be merged as
171 /// a ShuffleVector instruction.
172 static bool canCombineAsAltInst(unsigned Op) {
173   return Op == Instruction::FAdd || Op == Instruction::FSub ||
174          Op == Instruction::Sub || Op == Instruction::Add;
175 }
176 
177 /// \returns ShuffleVector instruction if instructions in \p VL have
178 ///  alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence.
179 /// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...)
180 static unsigned isAltInst(ArrayRef<Value *> VL) {
181   Instruction *I0 = dyn_cast<Instruction>(VL[0]);
182   unsigned Opcode = I0->getOpcode();
183   unsigned AltOpcode = getAltOpcode(Opcode);
184   for (int i = 1, e = VL.size(); i < e; i++) {
185     Instruction *I = dyn_cast<Instruction>(VL[i]);
186     if (!I || I->getOpcode() != ((i & 1) ? AltOpcode : Opcode))
187       return 0;
188   }
189   return Instruction::ShuffleVector;
190 }
191 
192 /// \returns The opcode if all of the Instructions in \p VL have the same
193 /// opcode, or zero.
194 static unsigned getSameOpcode(ArrayRef<Value *> VL) {
195   Instruction *I0 = dyn_cast<Instruction>(VL[0]);
196   if (!I0)
197     return 0;
198   unsigned Opcode = I0->getOpcode();
199   for (int i = 1, e = VL.size(); i < e; i++) {
200     Instruction *I = dyn_cast<Instruction>(VL[i]);
201     if (!I || Opcode != I->getOpcode()) {
202       if (canCombineAsAltInst(Opcode) && i == 1)
203         return isAltInst(VL);
204       return 0;
205     }
206   }
207   return Opcode;
208 }
209 
210 /// Get the intersection (logical and) of all of the potential IR flags
211 /// of each scalar operation (VL) that will be converted into a vector (I).
212 /// Flag set: NSW, NUW, exact, and all of fast-math.
213 static void propagateIRFlags(Value *I, ArrayRef<Value *> VL) {
214   if (auto *VecOp = dyn_cast<Instruction>(I)) {
215     if (auto *I0 = dyn_cast<Instruction>(VL[0])) {
216       // VecOVp is initialized to the 0th scalar, so start counting from index
217       // '1'.
218       VecOp->copyIRFlags(I0);
219       for (int i = 1, e = VL.size(); i < e; ++i) {
220         if (auto *Scalar = dyn_cast<Instruction>(VL[i]))
221           VecOp->andIRFlags(Scalar);
222       }
223     }
224   }
225 }
226 
227 /// \returns true if all of the values in \p VL have the same type or false
228 /// otherwise.
229 static bool allSameType(ArrayRef<Value *> VL) {
230   Type *Ty = VL[0]->getType();
231   for (int i = 1, e = VL.size(); i < e; i++)
232     if (VL[i]->getType() != Ty)
233       return false;
234 
235   return true;
236 }
237 
238 /// \returns True if Extract{Value,Element} instruction extracts element Idx.
239 static bool matchExtractIndex(Instruction *E, unsigned Idx, unsigned Opcode) {
240   assert(Opcode == Instruction::ExtractElement ||
241          Opcode == Instruction::ExtractValue);
242   if (Opcode == Instruction::ExtractElement) {
243     ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1));
244     return CI && CI->getZExtValue() == Idx;
245   } else {
246     ExtractValueInst *EI = cast<ExtractValueInst>(E);
247     return EI->getNumIndices() == 1 && *EI->idx_begin() == Idx;
248   }
249 }
250 
251 /// \returns True if in-tree use also needs extract. This refers to
252 /// possible scalar operand in vectorized instruction.
253 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
254                                     TargetLibraryInfo *TLI) {
255 
256   unsigned Opcode = UserInst->getOpcode();
257   switch (Opcode) {
258   case Instruction::Load: {
259     LoadInst *LI = cast<LoadInst>(UserInst);
260     return (LI->getPointerOperand() == Scalar);
261   }
262   case Instruction::Store: {
263     StoreInst *SI = cast<StoreInst>(UserInst);
264     return (SI->getPointerOperand() == Scalar);
265   }
266   case Instruction::Call: {
267     CallInst *CI = cast<CallInst>(UserInst);
268     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
269     if (hasVectorInstrinsicScalarOpd(ID, 1)) {
270       return (CI->getArgOperand(1) == Scalar);
271     }
272   }
273   default:
274     return false;
275   }
276 }
277 
278 /// \returns the AA location that is being access by the instruction.
279 static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) {
280   if (StoreInst *SI = dyn_cast<StoreInst>(I))
281     return MemoryLocation::get(SI);
282   if (LoadInst *LI = dyn_cast<LoadInst>(I))
283     return MemoryLocation::get(LI);
284   return MemoryLocation();
285 }
286 
287 /// \returns True if the instruction is not a volatile or atomic load/store.
288 static bool isSimple(Instruction *I) {
289   if (LoadInst *LI = dyn_cast<LoadInst>(I))
290     return LI->isSimple();
291   if (StoreInst *SI = dyn_cast<StoreInst>(I))
292     return SI->isSimple();
293   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
294     return !MI->isVolatile();
295   return true;
296 }
297 
298 namespace llvm {
299 namespace slpvectorizer {
300 /// Bottom Up SLP Vectorizer.
301 class BoUpSLP {
302 public:
303   typedef SmallVector<Value *, 8> ValueList;
304   typedef SmallVector<Instruction *, 16> InstrList;
305   typedef SmallPtrSet<Value *, 16> ValueSet;
306   typedef SmallVector<StoreInst *, 8> StoreList;
307   typedef MapVector<Value *, SmallVector<Instruction *, 2>>
308       ExtraValueToDebugLocsMap;
309 
310   BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti,
311           TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li,
312           DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB,
313           const DataLayout *DL)
314       : NumLoadsWantToKeepOrder(0), NumLoadsWantToChangeOrder(0), F(Func),
315         SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), DB(DB),
316         DL(DL), Builder(Se->getContext()) {
317     CodeMetrics::collectEphemeralValues(F, AC, EphValues);
318     // Use the vector register size specified by the target unless overridden
319     // by a command-line option.
320     // TODO: It would be better to limit the vectorization factor based on
321     //       data type rather than just register size. For example, x86 AVX has
322     //       256-bit registers, but it does not support integer operations
323     //       at that width (that requires AVX2).
324     if (MaxVectorRegSizeOption.getNumOccurrences())
325       MaxVecRegSize = MaxVectorRegSizeOption;
326     else
327       MaxVecRegSize = TTI->getRegisterBitWidth(true);
328 
329     MinVecRegSize = MinVectorRegSizeOption;
330   }
331 
332   /// \brief Vectorize the tree that starts with the elements in \p VL.
333   /// Returns the vectorized root.
334   Value *vectorizeTree();
335   /// Vectorize the tree but with the list of externally used values \p
336   /// ExternallyUsedValues. Values in this MapVector can be replaced but the
337   /// generated extractvalue instructions.
338   Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues);
339 
340   /// \returns the cost incurred by unwanted spills and fills, caused by
341   /// holding live values over call sites.
342   int getSpillCost();
343 
344   /// \returns the vectorization cost of the subtree that starts at \p VL.
345   /// A negative number means that this is profitable.
346   int getTreeCost();
347 
348   /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
349   /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
350   void buildTree(ArrayRef<Value *> Roots,
351                  ArrayRef<Value *> UserIgnoreLst = None);
352   /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
353   /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking
354   /// into account (anf updating it, if required) list of externally used
355   /// values stored in \p ExternallyUsedValues.
356   void buildTree(ArrayRef<Value *> Roots,
357                  ExtraValueToDebugLocsMap &ExternallyUsedValues,
358                  ArrayRef<Value *> UserIgnoreLst = None);
359 
360   /// Clear the internal data structures that are created by 'buildTree'.
361   void deleteTree() {
362     VectorizableTree.clear();
363     ScalarToTreeEntry.clear();
364     MustGather.clear();
365     ExternalUses.clear();
366     NumLoadsWantToKeepOrder = 0;
367     NumLoadsWantToChangeOrder = 0;
368     for (auto &Iter : BlocksSchedules) {
369       BlockScheduling *BS = Iter.second.get();
370       BS->clear();
371     }
372     MinBWs.clear();
373   }
374 
375   /// \brief Perform LICM and CSE on the newly generated gather sequences.
376   void optimizeGatherSequence();
377 
378   /// \returns true if it is beneficial to reverse the vector order.
379   bool shouldReorder() const {
380     return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder;
381   }
382 
383   /// \return The vector element size in bits to use when vectorizing the
384   /// expression tree ending at \p V. If V is a store, the size is the width of
385   /// the stored value. Otherwise, the size is the width of the largest loaded
386   /// value reaching V. This method is used by the vectorizer to calculate
387   /// vectorization factors.
388   unsigned getVectorElementSize(Value *V);
389 
390   /// Compute the minimum type sizes required to represent the entries in a
391   /// vectorizable tree.
392   void computeMinimumValueSizes();
393 
394   // \returns maximum vector register size as set by TTI or overridden by cl::opt.
395   unsigned getMaxVecRegSize() const {
396     return MaxVecRegSize;
397   }
398 
399   // \returns minimum vector register size as set by cl::opt.
400   unsigned getMinVecRegSize() const {
401     return MinVecRegSize;
402   }
403 
404   /// \brief Check if ArrayType or StructType is isomorphic to some VectorType.
405   ///
406   /// \returns number of elements in vector if isomorphism exists, 0 otherwise.
407   unsigned canMapToVector(Type *T, const DataLayout &DL) const;
408 
409   /// \returns True if the VectorizableTree is both tiny and not fully
410   /// vectorizable. We do not vectorize such trees.
411   bool isTreeTinyAndNotFullyVectorizable();
412 
413 private:
414   struct TreeEntry;
415 
416   /// \returns the cost of the vectorizable entry.
417   int getEntryCost(TreeEntry *E);
418 
419   /// This is the recursive part of buildTree.
420   void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth);
421 
422   /// \returns True if the ExtractElement/ExtractValue instructions in VL can
423   /// be vectorized to use the original vector (or aggregate "bitcast" to a vector).
424   bool canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const;
425 
426   /// Vectorize a single entry in the tree. VL icontains all isomorphic scalars
427   /// in order of its usage in a user program, for example ADD1, ADD2 and so on
428   /// or LOAD1 , LOAD2 etc.
429   Value *vectorizeTree(ArrayRef<Value *> VL, TreeEntry *E);
430 
431   /// Vectorize a single entry in the tree, starting in \p VL.
432   Value *vectorizeTree(ArrayRef<Value *> VL);
433 
434   /// \returns the pointer to the vectorized value if \p VL is already
435   /// vectorized, or NULL. They may happen in cycles.
436   Value *alreadyVectorized(ArrayRef<Value *> VL) const;
437 
438   /// \returns the scalarization cost for this type. Scalarization in this
439   /// context means the creation of vectors from a group of scalars.
440   int getGatherCost(Type *Ty);
441 
442   /// \returns the scalarization cost for this list of values. Assuming that
443   /// this subtree gets vectorized, we may need to extract the values from the
444   /// roots. This method calculates the cost of extracting the values.
445   int getGatherCost(ArrayRef<Value *> VL);
446 
447   /// \brief Set the Builder insert point to one after the last instruction in
448   /// the bundle
449   void setInsertPointAfterBundle(ArrayRef<Value *> VL);
450 
451   /// \returns a vector from a collection of scalars in \p VL.
452   Value *Gather(ArrayRef<Value *> VL, VectorType *Ty);
453 
454   /// \returns whether the VectorizableTree is fully vectorizable and will
455   /// be beneficial even the tree height is tiny.
456   bool isFullyVectorizableTinyTree();
457 
458   /// \reorder commutative operands in alt shuffle if they result in
459   ///  vectorized code.
460   void reorderAltShuffleOperands(ArrayRef<Value *> VL,
461                                  SmallVectorImpl<Value *> &Left,
462                                  SmallVectorImpl<Value *> &Right);
463   /// \reorder commutative operands to get better probability of
464   /// generating vectorized code.
465   void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
466                                       SmallVectorImpl<Value *> &Left,
467                                       SmallVectorImpl<Value *> &Right);
468   struct TreeEntry {
469     TreeEntry() : Scalars(), VectorizedValue(nullptr),
470     NeedToGather(0), NeedToShuffle(0) {}
471 
472     /// \returns true if the scalars in VL are equal to this entry.
473     bool isSame(ArrayRef<Value *> VL) const {
474       assert(VL.size() == Scalars.size() && "Invalid size");
475       return std::equal(VL.begin(), VL.end(), Scalars.begin());
476     }
477 
478     /// \returns true if the scalars in VL are found in this tree entry.
479     bool isFoundJumbled(ArrayRef<Value *> VL, const DataLayout &DL,
480                         ScalarEvolution &SE) const {
481       assert(VL.size() == Scalars.size() && "Invalid size");
482       SmallVector<Value *, 8> List;
483       if (!sortMemAccesses(VL, DL, SE, List))
484         return false;
485 
486       return std::equal(List.begin(), List.end(), Scalars.begin());
487     }
488 
489     /// A vector of scalars.
490     ValueList Scalars;
491 
492     /// The Scalars are vectorized into this value. It is initialized to Null.
493     Value *VectorizedValue;
494 
495     /// Do we need to gather this sequence ?
496     bool NeedToGather;
497 
498     /// Do we need to shuffle the load ?
499     bool NeedToShuffle;
500   };
501 
502   /// Create a new VectorizableTree entry.
503   TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized,
504                           bool NeedToShuffle) {
505     VectorizableTree.emplace_back();
506     int idx = VectorizableTree.size() - 1;
507     TreeEntry *Last = &VectorizableTree[idx];
508     Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end());
509     Last->NeedToGather = !Vectorized;
510     Last->NeedToShuffle = NeedToShuffle;
511     if (Vectorized) {
512       for (int i = 0, e = VL.size(); i != e; ++i) {
513         assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!");
514         ScalarToTreeEntry[VL[i]] = idx;
515       }
516     } else {
517       MustGather.insert(VL.begin(), VL.end());
518     }
519     return Last;
520   }
521 
522   /// -- Vectorization State --
523   /// Holds all of the tree entries.
524   std::vector<TreeEntry> VectorizableTree;
525 
526   /// Maps a specific scalar to its tree entry.
527   SmallDenseMap<Value*, int> ScalarToTreeEntry;
528 
529   /// A list of scalars that we found that we need to keep as scalars.
530   ValueSet MustGather;
531 
532   /// This POD struct describes one external user in the vectorized tree.
533   struct ExternalUser {
534     ExternalUser (Value *S, llvm::User *U, int L) :
535       Scalar(S), User(U), Lane(L){}
536     // Which scalar in our function.
537     Value *Scalar;
538     // Which user that uses the scalar.
539     llvm::User *User;
540     // Which lane does the scalar belong to.
541     int Lane;
542   };
543   typedef SmallVector<ExternalUser, 16> UserList;
544 
545   /// Checks if two instructions may access the same memory.
546   ///
547   /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
548   /// is invariant in the calling loop.
549   bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1,
550                  Instruction *Inst2) {
551 
552     // First check if the result is already in the cache.
553     AliasCacheKey key = std::make_pair(Inst1, Inst2);
554     Optional<bool> &result = AliasCache[key];
555     if (result.hasValue()) {
556       return result.getValue();
557     }
558     MemoryLocation Loc2 = getLocation(Inst2, AA);
559     bool aliased = true;
560     if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) {
561       // Do the alias check.
562       aliased = AA->alias(Loc1, Loc2);
563     }
564     // Store the result in the cache.
565     result = aliased;
566     return aliased;
567   }
568 
569   typedef std::pair<Instruction *, Instruction *> AliasCacheKey;
570 
571   /// Cache for alias results.
572   /// TODO: consider moving this to the AliasAnalysis itself.
573   DenseMap<AliasCacheKey, Optional<bool>> AliasCache;
574 
575   /// Removes an instruction from its block and eventually deletes it.
576   /// It's like Instruction::eraseFromParent() except that the actual deletion
577   /// is delayed until BoUpSLP is destructed.
578   /// This is required to ensure that there are no incorrect collisions in the
579   /// AliasCache, which can happen if a new instruction is allocated at the
580   /// same address as a previously deleted instruction.
581   void eraseInstruction(Instruction *I) {
582     I->removeFromParent();
583     I->dropAllReferences();
584     DeletedInstructions.push_back(std::unique_ptr<Instruction>(I));
585   }
586 
587   /// Temporary store for deleted instructions. Instructions will be deleted
588   /// eventually when the BoUpSLP is destructed.
589   SmallVector<std::unique_ptr<Instruction>, 8> DeletedInstructions;
590 
591   /// A list of values that need to extracted out of the tree.
592   /// This list holds pairs of (Internal Scalar : External User). External User
593   /// can be nullptr, it means that this Internal Scalar will be used later,
594   /// after vectorization.
595   UserList ExternalUses;
596 
597   /// Values used only by @llvm.assume calls.
598   SmallPtrSet<const Value *, 32> EphValues;
599 
600   /// Holds all of the instructions that we gathered.
601   SetVector<Instruction *> GatherSeq;
602   /// A list of blocks that we are going to CSE.
603   SetVector<BasicBlock *> CSEBlocks;
604 
605   /// Contains all scheduling relevant data for an instruction.
606   /// A ScheduleData either represents a single instruction or a member of an
607   /// instruction bundle (= a group of instructions which is combined into a
608   /// vector instruction).
609   struct ScheduleData {
610 
611     // The initial value for the dependency counters. It means that the
612     // dependencies are not calculated yet.
613     enum { InvalidDeps = -1 };
614 
615     ScheduleData()
616         : Inst(nullptr), FirstInBundle(nullptr), NextInBundle(nullptr),
617           NextLoadStore(nullptr), SchedulingRegionID(0), SchedulingPriority(0),
618           Dependencies(InvalidDeps), UnscheduledDeps(InvalidDeps),
619           UnscheduledDepsInBundle(InvalidDeps), IsScheduled(false) {}
620 
621     void init(int BlockSchedulingRegionID) {
622       FirstInBundle = this;
623       NextInBundle = nullptr;
624       NextLoadStore = nullptr;
625       IsScheduled = false;
626       SchedulingRegionID = BlockSchedulingRegionID;
627       UnscheduledDepsInBundle = UnscheduledDeps;
628       clearDependencies();
629     }
630 
631     /// Returns true if the dependency information has been calculated.
632     bool hasValidDependencies() const { return Dependencies != InvalidDeps; }
633 
634     /// Returns true for single instructions and for bundle representatives
635     /// (= the head of a bundle).
636     bool isSchedulingEntity() const { return FirstInBundle == this; }
637 
638     /// Returns true if it represents an instruction bundle and not only a
639     /// single instruction.
640     bool isPartOfBundle() const {
641       return NextInBundle != nullptr || FirstInBundle != this;
642     }
643 
644     /// Returns true if it is ready for scheduling, i.e. it has no more
645     /// unscheduled depending instructions/bundles.
646     bool isReady() const {
647       assert(isSchedulingEntity() &&
648              "can't consider non-scheduling entity for ready list");
649       return UnscheduledDepsInBundle == 0 && !IsScheduled;
650     }
651 
652     /// Modifies the number of unscheduled dependencies, also updating it for
653     /// the whole bundle.
654     int incrementUnscheduledDeps(int Incr) {
655       UnscheduledDeps += Incr;
656       return FirstInBundle->UnscheduledDepsInBundle += Incr;
657     }
658 
659     /// Sets the number of unscheduled dependencies to the number of
660     /// dependencies.
661     void resetUnscheduledDeps() {
662       incrementUnscheduledDeps(Dependencies - UnscheduledDeps);
663     }
664 
665     /// Clears all dependency information.
666     void clearDependencies() {
667       Dependencies = InvalidDeps;
668       resetUnscheduledDeps();
669       MemoryDependencies.clear();
670     }
671 
672     void dump(raw_ostream &os) const {
673       if (!isSchedulingEntity()) {
674         os << "/ " << *Inst;
675       } else if (NextInBundle) {
676         os << '[' << *Inst;
677         ScheduleData *SD = NextInBundle;
678         while (SD) {
679           os << ';' << *SD->Inst;
680           SD = SD->NextInBundle;
681         }
682         os << ']';
683       } else {
684         os << *Inst;
685       }
686     }
687 
688     Instruction *Inst;
689 
690     /// Points to the head in an instruction bundle (and always to this for
691     /// single instructions).
692     ScheduleData *FirstInBundle;
693 
694     /// Single linked list of all instructions in a bundle. Null if it is a
695     /// single instruction.
696     ScheduleData *NextInBundle;
697 
698     /// Single linked list of all memory instructions (e.g. load, store, call)
699     /// in the block - until the end of the scheduling region.
700     ScheduleData *NextLoadStore;
701 
702     /// The dependent memory instructions.
703     /// This list is derived on demand in calculateDependencies().
704     SmallVector<ScheduleData *, 4> MemoryDependencies;
705 
706     /// This ScheduleData is in the current scheduling region if this matches
707     /// the current SchedulingRegionID of BlockScheduling.
708     int SchedulingRegionID;
709 
710     /// Used for getting a "good" final ordering of instructions.
711     int SchedulingPriority;
712 
713     /// The number of dependencies. Constitutes of the number of users of the
714     /// instruction plus the number of dependent memory instructions (if any).
715     /// This value is calculated on demand.
716     /// If InvalidDeps, the number of dependencies is not calculated yet.
717     ///
718     int Dependencies;
719 
720     /// The number of dependencies minus the number of dependencies of scheduled
721     /// instructions. As soon as this is zero, the instruction/bundle gets ready
722     /// for scheduling.
723     /// Note that this is negative as long as Dependencies is not calculated.
724     int UnscheduledDeps;
725 
726     /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for
727     /// single instructions.
728     int UnscheduledDepsInBundle;
729 
730     /// True if this instruction is scheduled (or considered as scheduled in the
731     /// dry-run).
732     bool IsScheduled;
733   };
734 
735 #ifndef NDEBUG
736   friend inline raw_ostream &operator<<(raw_ostream &os,
737                                         const BoUpSLP::ScheduleData &SD) {
738     SD.dump(os);
739     return os;
740   }
741 #endif
742 
743   /// Contains all scheduling data for a basic block.
744   ///
745   struct BlockScheduling {
746 
747     BlockScheduling(BasicBlock *BB)
748         : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize),
749           ScheduleStart(nullptr), ScheduleEnd(nullptr),
750           FirstLoadStoreInRegion(nullptr), LastLoadStoreInRegion(nullptr),
751           ScheduleRegionSize(0),
752           ScheduleRegionSizeLimit(ScheduleRegionSizeBudget),
753           // Make sure that the initial SchedulingRegionID is greater than the
754           // initial SchedulingRegionID in ScheduleData (which is 0).
755           SchedulingRegionID(1) {}
756 
757     void clear() {
758       ReadyInsts.clear();
759       ScheduleStart = nullptr;
760       ScheduleEnd = nullptr;
761       FirstLoadStoreInRegion = nullptr;
762       LastLoadStoreInRegion = nullptr;
763 
764       // Reduce the maximum schedule region size by the size of the
765       // previous scheduling run.
766       ScheduleRegionSizeLimit -= ScheduleRegionSize;
767       if (ScheduleRegionSizeLimit < MinScheduleRegionSize)
768         ScheduleRegionSizeLimit = MinScheduleRegionSize;
769       ScheduleRegionSize = 0;
770 
771       // Make a new scheduling region, i.e. all existing ScheduleData is not
772       // in the new region yet.
773       ++SchedulingRegionID;
774     }
775 
776     ScheduleData *getScheduleData(Value *V) {
777       ScheduleData *SD = ScheduleDataMap[V];
778       if (SD && SD->SchedulingRegionID == SchedulingRegionID)
779         return SD;
780       return nullptr;
781     }
782 
783     bool isInSchedulingRegion(ScheduleData *SD) {
784       return SD->SchedulingRegionID == SchedulingRegionID;
785     }
786 
787     /// Marks an instruction as scheduled and puts all dependent ready
788     /// instructions into the ready-list.
789     template <typename ReadyListType>
790     void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
791       SD->IsScheduled = true;
792       DEBUG(dbgs() << "SLP:   schedule " << *SD << "\n");
793 
794       ScheduleData *BundleMember = SD;
795       while (BundleMember) {
796         // Handle the def-use chain dependencies.
797         for (Use &U : BundleMember->Inst->operands()) {
798           ScheduleData *OpDef = getScheduleData(U.get());
799           if (OpDef && OpDef->hasValidDependencies() &&
800               OpDef->incrementUnscheduledDeps(-1) == 0) {
801             // There are no more unscheduled dependencies after decrementing,
802             // so we can put the dependent instruction into the ready list.
803             ScheduleData *DepBundle = OpDef->FirstInBundle;
804             assert(!DepBundle->IsScheduled &&
805                    "already scheduled bundle gets ready");
806             ReadyList.insert(DepBundle);
807             DEBUG(dbgs() << "SLP:    gets ready (def): " << *DepBundle << "\n");
808           }
809         }
810         // Handle the memory dependencies.
811         for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) {
812           if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) {
813             // There are no more unscheduled dependencies after decrementing,
814             // so we can put the dependent instruction into the ready list.
815             ScheduleData *DepBundle = MemoryDepSD->FirstInBundle;
816             assert(!DepBundle->IsScheduled &&
817                    "already scheduled bundle gets ready");
818             ReadyList.insert(DepBundle);
819             DEBUG(dbgs() << "SLP:    gets ready (mem): " << *DepBundle << "\n");
820           }
821         }
822         BundleMember = BundleMember->NextInBundle;
823       }
824     }
825 
826     /// Put all instructions into the ReadyList which are ready for scheduling.
827     template <typename ReadyListType>
828     void initialFillReadyList(ReadyListType &ReadyList) {
829       for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
830         ScheduleData *SD = getScheduleData(I);
831         if (SD->isSchedulingEntity() && SD->isReady()) {
832           ReadyList.insert(SD);
833           DEBUG(dbgs() << "SLP:    initially in ready list: " << *I << "\n");
834         }
835       }
836     }
837 
838     /// Checks if a bundle of instructions can be scheduled, i.e. has no
839     /// cyclic dependencies. This is only a dry-run, no instructions are
840     /// actually moved at this stage.
841     bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP);
842 
843     /// Un-bundles a group of instructions.
844     void cancelScheduling(ArrayRef<Value *> VL);
845 
846     /// Extends the scheduling region so that V is inside the region.
847     /// \returns true if the region size is within the limit.
848     bool extendSchedulingRegion(Value *V);
849 
850     /// Initialize the ScheduleData structures for new instructions in the
851     /// scheduling region.
852     void initScheduleData(Instruction *FromI, Instruction *ToI,
853                           ScheduleData *PrevLoadStore,
854                           ScheduleData *NextLoadStore);
855 
856     /// Updates the dependency information of a bundle and of all instructions/
857     /// bundles which depend on the original bundle.
858     void calculateDependencies(ScheduleData *SD, bool InsertInReadyList,
859                                BoUpSLP *SLP);
860 
861     /// Sets all instruction in the scheduling region to un-scheduled.
862     void resetSchedule();
863 
864     BasicBlock *BB;
865 
866     /// Simple memory allocation for ScheduleData.
867     std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks;
868 
869     /// The size of a ScheduleData array in ScheduleDataChunks.
870     int ChunkSize;
871 
872     /// The allocator position in the current chunk, which is the last entry
873     /// of ScheduleDataChunks.
874     int ChunkPos;
875 
876     /// Attaches ScheduleData to Instruction.
877     /// Note that the mapping survives during all vectorization iterations, i.e.
878     /// ScheduleData structures are recycled.
879     DenseMap<Value *, ScheduleData *> ScheduleDataMap;
880 
881     struct ReadyList : SmallVector<ScheduleData *, 8> {
882       void insert(ScheduleData *SD) { push_back(SD); }
883     };
884 
885     /// The ready-list for scheduling (only used for the dry-run).
886     ReadyList ReadyInsts;
887 
888     /// The first instruction of the scheduling region.
889     Instruction *ScheduleStart;
890 
891     /// The first instruction _after_ the scheduling region.
892     Instruction *ScheduleEnd;
893 
894     /// The first memory accessing instruction in the scheduling region
895     /// (can be null).
896     ScheduleData *FirstLoadStoreInRegion;
897 
898     /// The last memory accessing instruction in the scheduling region
899     /// (can be null).
900     ScheduleData *LastLoadStoreInRegion;
901 
902     /// The current size of the scheduling region.
903     int ScheduleRegionSize;
904 
905     /// The maximum size allowed for the scheduling region.
906     int ScheduleRegionSizeLimit;
907 
908     /// The ID of the scheduling region. For a new vectorization iteration this
909     /// is incremented which "removes" all ScheduleData from the region.
910     int SchedulingRegionID;
911   };
912 
913   /// Attaches the BlockScheduling structures to basic blocks.
914   MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules;
915 
916   /// Performs the "real" scheduling. Done before vectorization is actually
917   /// performed in a basic block.
918   void scheduleBlock(BlockScheduling *BS);
919 
920   /// List of users to ignore during scheduling and that don't need extracting.
921   ArrayRef<Value *> UserIgnoreList;
922 
923   // Number of load bundles that contain consecutive loads.
924   int NumLoadsWantToKeepOrder;
925 
926   // Number of load bundles that contain consecutive loads in reversed order.
927   int NumLoadsWantToChangeOrder;
928 
929   // Analysis and block reference.
930   Function *F;
931   ScalarEvolution *SE;
932   TargetTransformInfo *TTI;
933   TargetLibraryInfo *TLI;
934   AliasAnalysis *AA;
935   LoopInfo *LI;
936   DominatorTree *DT;
937   AssumptionCache *AC;
938   DemandedBits *DB;
939   const DataLayout *DL;
940   unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt.
941   unsigned MinVecRegSize; // Set by cl::opt (default: 128).
942   /// Instruction builder to construct the vectorized tree.
943   IRBuilder<> Builder;
944 
945   /// A map of scalar integer values to the smallest bit width with which they
946   /// can legally be represented. The values map to (width, signed) pairs,
947   /// where "width" indicates the minimum bit width and "signed" is True if the
948   /// value must be signed-extended, rather than zero-extended, back to its
949   /// original width.
950   MapVector<Value *, std::pair<uint64_t, bool>> MinBWs;
951 };
952 
953 } // end namespace llvm
954 } // end namespace slpvectorizer
955 
956 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
957                         ArrayRef<Value *> UserIgnoreLst) {
958   ExtraValueToDebugLocsMap ExternallyUsedValues;
959   buildTree(Roots, ExternallyUsedValues, UserIgnoreLst);
960 }
961 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
962                         ExtraValueToDebugLocsMap &ExternallyUsedValues,
963                         ArrayRef<Value *> UserIgnoreLst) {
964   deleteTree();
965   UserIgnoreList = UserIgnoreLst;
966   if (!allSameType(Roots))
967     return;
968   buildTree_rec(Roots, 0);
969 
970   // Collect the values that we need to extract from the tree.
971   for (TreeEntry &EIdx : VectorizableTree) {
972     TreeEntry *Entry = &EIdx;
973 
974     // For each lane:
975     for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
976       Value *Scalar = Entry->Scalars[Lane];
977 
978       // No need to handle users of gathered values.
979       if (Entry->NeedToGather)
980         continue;
981 
982       // Check if the scalar is externally used as an extra arg.
983       auto ExtI = ExternallyUsedValues.find(Scalar);
984       if (ExtI != ExternallyUsedValues.end()) {
985         DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " <<
986               Lane << " from " << *Scalar << ".\n");
987         ExternalUses.emplace_back(Scalar, nullptr, Lane);
988         continue;
989       }
990       for (User *U : Scalar->users()) {
991         DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
992 
993         Instruction *UserInst = dyn_cast<Instruction>(U);
994         if (!UserInst)
995           continue;
996 
997         // Skip in-tree scalars that become vectors
998         if (ScalarToTreeEntry.count(U)) {
999           int Idx = ScalarToTreeEntry[U];
1000           TreeEntry *UseEntry = &VectorizableTree[Idx];
1001           Value *UseScalar = UseEntry->Scalars[0];
1002           // Some in-tree scalars will remain as scalar in vectorized
1003           // instructions. If that is the case, the one in Lane 0 will
1004           // be used.
1005           if (UseScalar != U ||
1006               !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) {
1007             DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
1008                          << ".\n");
1009             assert(!VectorizableTree[Idx].NeedToGather && "Bad state");
1010             continue;
1011           }
1012         }
1013 
1014         // Ignore users in the user ignore list.
1015         if (is_contained(UserIgnoreList, UserInst))
1016           continue;
1017 
1018         DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " <<
1019               Lane << " from " << *Scalar << ".\n");
1020         ExternalUses.push_back(ExternalUser(Scalar, U, Lane));
1021       }
1022     }
1023   }
1024 }
1025 
1026 
1027 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
1028   bool isAltShuffle = false;
1029   assert((allConstant(VL) || allSameType(VL)) && "Invalid types!");
1030 
1031   if (Depth == RecursionMaxDepth) {
1032     DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
1033     newTreeEntry(VL, false, false);
1034     return;
1035   }
1036 
1037   // Don't handle vectors.
1038   if (VL[0]->getType()->isVectorTy()) {
1039     DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
1040     newTreeEntry(VL, false, false);
1041     return;
1042   }
1043 
1044   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
1045     if (SI->getValueOperand()->getType()->isVectorTy()) {
1046       DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
1047       newTreeEntry(VL, false, false);
1048       return;
1049     }
1050   unsigned Opcode = getSameOpcode(VL);
1051 
1052   // Check that this shuffle vector refers to the alternate
1053   // sequence of opcodes.
1054   if (Opcode == Instruction::ShuffleVector) {
1055     Instruction *I0 = dyn_cast<Instruction>(VL[0]);
1056     unsigned Op = I0->getOpcode();
1057     if (Op != Instruction::ShuffleVector)
1058       isAltShuffle = true;
1059   }
1060 
1061   // If all of the operands are identical or constant we have a simple solution.
1062   if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !Opcode) {
1063     DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
1064     newTreeEntry(VL, false, false);
1065     return;
1066   }
1067 
1068   // We now know that this is a vector of instructions of the same type from
1069   // the same block.
1070 
1071   // Don't vectorize ephemeral values.
1072   for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1073     if (EphValues.count(VL[i])) {
1074       DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
1075             ") is ephemeral.\n");
1076       newTreeEntry(VL, false, false);
1077       return;
1078     }
1079   }
1080 
1081   // Check if this is a duplicate of another entry.
1082   if (ScalarToTreeEntry.count(VL[0])) {
1083     int Idx = ScalarToTreeEntry[VL[0]];
1084     TreeEntry *E = &VectorizableTree[Idx];
1085     for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1086       DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n");
1087       if (E->Scalars[i] != VL[i]) {
1088         DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
1089         newTreeEntry(VL, false, false);
1090         return;
1091       }
1092     }
1093     DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n");
1094     return;
1095   }
1096 
1097   // Check that none of the instructions in the bundle are already in the tree.
1098   for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1099     if (ScalarToTreeEntry.count(VL[i])) {
1100       DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
1101             ") is already in tree.\n");
1102       newTreeEntry(VL, false, false);
1103       return;
1104     }
1105   }
1106 
1107   // If any of the scalars is marked as a value that needs to stay scalar then
1108   // we need to gather the scalars.
1109   for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1110     if (MustGather.count(VL[i])) {
1111       DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
1112       newTreeEntry(VL, false, false);
1113       return;
1114     }
1115   }
1116 
1117   // Check that all of the users of the scalars that we want to vectorize are
1118   // schedulable.
1119   Instruction *VL0 = cast<Instruction>(VL[0]);
1120   BasicBlock *BB = cast<Instruction>(VL0)->getParent();
1121 
1122   if (!DT->isReachableFromEntry(BB)) {
1123     // Don't go into unreachable blocks. They may contain instructions with
1124     // dependency cycles which confuse the final scheduling.
1125     DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
1126     newTreeEntry(VL, false, false);
1127     return;
1128   }
1129 
1130   // Check that every instructions appears once in this bundle.
1131   for (unsigned i = 0, e = VL.size(); i < e; ++i)
1132     for (unsigned j = i+1; j < e; ++j)
1133       if (VL[i] == VL[j]) {
1134         DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
1135         newTreeEntry(VL, false, false);
1136         return;
1137       }
1138 
1139   auto &BSRef = BlocksSchedules[BB];
1140   if (!BSRef) {
1141     BSRef = llvm::make_unique<BlockScheduling>(BB);
1142   }
1143   BlockScheduling &BS = *BSRef.get();
1144 
1145   if (!BS.tryScheduleBundle(VL, this)) {
1146     DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
1147     assert((!BS.getScheduleData(VL[0]) ||
1148             !BS.getScheduleData(VL[0])->isPartOfBundle()) &&
1149            "tryScheduleBundle should cancelScheduling on failure");
1150     newTreeEntry(VL, false, false);
1151     return;
1152   }
1153   DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
1154 
1155   switch (Opcode) {
1156     case Instruction::PHI: {
1157       PHINode *PH = dyn_cast<PHINode>(VL0);
1158 
1159       // Check for terminator values (e.g. invoke).
1160       for (unsigned j = 0; j < VL.size(); ++j)
1161         for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
1162           TerminatorInst *Term = dyn_cast<TerminatorInst>(
1163               cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i)));
1164           if (Term) {
1165             DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n");
1166             BS.cancelScheduling(VL);
1167             newTreeEntry(VL, false, false);
1168             return;
1169           }
1170         }
1171 
1172       newTreeEntry(VL, true, false);
1173       DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
1174 
1175       for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
1176         ValueList Operands;
1177         // Prepare the operand vector.
1178         for (Value *j : VL)
1179           Operands.push_back(cast<PHINode>(j)->getIncomingValueForBlock(
1180               PH->getIncomingBlock(i)));
1181 
1182         buildTree_rec(Operands, Depth + 1);
1183       }
1184       return;
1185     }
1186     case Instruction::ExtractValue:
1187     case Instruction::ExtractElement: {
1188       bool Reuse = canReuseExtract(VL, Opcode);
1189       if (Reuse) {
1190         DEBUG(dbgs() << "SLP: Reusing extract sequence.\n");
1191       } else {
1192         BS.cancelScheduling(VL);
1193       }
1194       newTreeEntry(VL, Reuse, false);
1195       return;
1196     }
1197     case Instruction::Load: {
1198       // Check that a vectorized load would load the same memory as a scalar
1199       // load.
1200       // For example we don't want vectorize loads that are smaller than 8 bit.
1201       // Even though we have a packed struct {<i2, i2, i2, i2>} LLVM treats
1202       // loading/storing it as an i8 struct. If we vectorize loads/stores from
1203       // such a struct we read/write packed bits disagreeing with the
1204       // unvectorized version.
1205       Type *ScalarTy = VL[0]->getType();
1206 
1207       if (DL->getTypeSizeInBits(ScalarTy) !=
1208           DL->getTypeAllocSizeInBits(ScalarTy)) {
1209         BS.cancelScheduling(VL);
1210         newTreeEntry(VL, false, false);
1211         DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
1212         return;
1213       }
1214 
1215       // Make sure all loads in the bundle are simple - we can't vectorize
1216       // atomic or volatile loads.
1217       for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
1218         LoadInst *L = cast<LoadInst>(VL[i]);
1219         if (!L->isSimple()) {
1220           BS.cancelScheduling(VL);
1221           newTreeEntry(VL, false, false);
1222           DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
1223           return;
1224         }
1225       }
1226 
1227       // Check if the loads are consecutive, reversed, or neither.
1228       bool Consecutive = true;
1229       bool ReverseConsecutive = true;
1230       for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
1231         if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) {
1232           Consecutive = false;
1233           break;
1234         } else {
1235           ReverseConsecutive = false;
1236         }
1237       }
1238 
1239       if (Consecutive) {
1240         ++NumLoadsWantToKeepOrder;
1241         newTreeEntry(VL, true, false);
1242         DEBUG(dbgs() << "SLP: added a vector of loads.\n");
1243         return;
1244       }
1245 
1246       // If none of the load pairs were consecutive when checked in order,
1247       // check the reverse order.
1248       if (ReverseConsecutive)
1249         for (unsigned i = VL.size() - 1; i > 0; --i)
1250           if (!isConsecutiveAccess(VL[i], VL[i - 1], *DL, *SE)) {
1251             ReverseConsecutive = false;
1252             break;
1253           }
1254 
1255       if (VL.size() > 2 && !ReverseConsecutive) {
1256         bool ShuffledLoads = true;
1257         SmallVector<Value *, 8> Sorted;
1258         if (sortMemAccesses(VL, *DL, *SE, Sorted)) {
1259           auto NewVL = makeArrayRef(Sorted.begin(), Sorted.end());
1260           for (unsigned i = 0, e = NewVL.size() - 1; i < e; ++i) {
1261             if (!isConsecutiveAccess(NewVL[i], NewVL[i + 1], *DL, *SE)) {
1262               ShuffledLoads = false;
1263               break;
1264             }
1265           }
1266           if (ShuffledLoads) {
1267             newTreeEntry(NewVL, true, true);
1268             return;
1269           }
1270         }
1271       }
1272 
1273       BS.cancelScheduling(VL);
1274       newTreeEntry(VL, false, false);
1275 
1276       if (ReverseConsecutive) {
1277         ++NumLoadsWantToChangeOrder;
1278         DEBUG(dbgs() << "SLP: Gathering reversed loads.\n");
1279       } else {
1280         DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
1281       }
1282       return;
1283     }
1284     case Instruction::ZExt:
1285     case Instruction::SExt:
1286     case Instruction::FPToUI:
1287     case Instruction::FPToSI:
1288     case Instruction::FPExt:
1289     case Instruction::PtrToInt:
1290     case Instruction::IntToPtr:
1291     case Instruction::SIToFP:
1292     case Instruction::UIToFP:
1293     case Instruction::Trunc:
1294     case Instruction::FPTrunc:
1295     case Instruction::BitCast: {
1296       Type *SrcTy = VL0->getOperand(0)->getType();
1297       for (Value *Val : VL) {
1298         Type *Ty = cast<Instruction>(Val)->getOperand(0)->getType();
1299         if (Ty != SrcTy || !isValidElementType(Ty)) {
1300           BS.cancelScheduling(VL);
1301           newTreeEntry(VL, false, false);
1302           DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n");
1303           return;
1304         }
1305       }
1306       newTreeEntry(VL, true, false);
1307       DEBUG(dbgs() << "SLP: added a vector of casts.\n");
1308 
1309       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1310         ValueList Operands;
1311         // Prepare the operand vector.
1312         for (Value *j : VL)
1313           Operands.push_back(cast<Instruction>(j)->getOperand(i));
1314 
1315         buildTree_rec(Operands, Depth+1);
1316       }
1317       return;
1318     }
1319     case Instruction::ICmp:
1320     case Instruction::FCmp: {
1321       // Check that all of the compares have the same predicate.
1322       CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
1323       Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType();
1324       for (unsigned i = 1, e = VL.size(); i < e; ++i) {
1325         CmpInst *Cmp = cast<CmpInst>(VL[i]);
1326         if (Cmp->getPredicate() != P0 ||
1327             Cmp->getOperand(0)->getType() != ComparedTy) {
1328           BS.cancelScheduling(VL);
1329           newTreeEntry(VL, false, false);
1330           DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n");
1331           return;
1332         }
1333       }
1334 
1335       newTreeEntry(VL, true, false);
1336       DEBUG(dbgs() << "SLP: added a vector of compares.\n");
1337 
1338       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1339         ValueList Operands;
1340         // Prepare the operand vector.
1341         for (Value *j : VL)
1342           Operands.push_back(cast<Instruction>(j)->getOperand(i));
1343 
1344         buildTree_rec(Operands, Depth+1);
1345       }
1346       return;
1347     }
1348     case Instruction::Select:
1349     case Instruction::Add:
1350     case Instruction::FAdd:
1351     case Instruction::Sub:
1352     case Instruction::FSub:
1353     case Instruction::Mul:
1354     case Instruction::FMul:
1355     case Instruction::UDiv:
1356     case Instruction::SDiv:
1357     case Instruction::FDiv:
1358     case Instruction::URem:
1359     case Instruction::SRem:
1360     case Instruction::FRem:
1361     case Instruction::Shl:
1362     case Instruction::LShr:
1363     case Instruction::AShr:
1364     case Instruction::And:
1365     case Instruction::Or:
1366     case Instruction::Xor: {
1367       newTreeEntry(VL, true, false);
1368       DEBUG(dbgs() << "SLP: added a vector of bin op.\n");
1369 
1370       // Sort operands of the instructions so that each side is more likely to
1371       // have the same opcode.
1372       if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
1373         ValueList Left, Right;
1374         reorderInputsAccordingToOpcode(VL, Left, Right);
1375         buildTree_rec(Left, Depth + 1);
1376         buildTree_rec(Right, Depth + 1);
1377         return;
1378       }
1379 
1380       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1381         ValueList Operands;
1382         // Prepare the operand vector.
1383         for (Value *j : VL)
1384           Operands.push_back(cast<Instruction>(j)->getOperand(i));
1385 
1386         buildTree_rec(Operands, Depth+1);
1387       }
1388       return;
1389     }
1390     case Instruction::GetElementPtr: {
1391       // We don't combine GEPs with complicated (nested) indexing.
1392       for (Value *Val : VL) {
1393         if (cast<Instruction>(Val)->getNumOperands() != 2) {
1394           DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
1395           BS.cancelScheduling(VL);
1396           newTreeEntry(VL, false, false);
1397           return;
1398         }
1399       }
1400 
1401       // We can't combine several GEPs into one vector if they operate on
1402       // different types.
1403       Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType();
1404       for (Value *Val : VL) {
1405         Type *CurTy = cast<Instruction>(Val)->getOperand(0)->getType();
1406         if (Ty0 != CurTy) {
1407           DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n");
1408           BS.cancelScheduling(VL);
1409           newTreeEntry(VL, false, false);
1410           return;
1411         }
1412       }
1413 
1414       // We don't combine GEPs with non-constant indexes.
1415       for (Value *Val : VL) {
1416         auto Op = cast<Instruction>(Val)->getOperand(1);
1417         if (!isa<ConstantInt>(Op)) {
1418           DEBUG(
1419               dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n");
1420           BS.cancelScheduling(VL);
1421           newTreeEntry(VL, false, false);
1422           return;
1423         }
1424       }
1425 
1426       newTreeEntry(VL, true, false);
1427       DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
1428       for (unsigned i = 0, e = 2; i < e; ++i) {
1429         ValueList Operands;
1430         // Prepare the operand vector.
1431         for (Value *j : VL)
1432           Operands.push_back(cast<Instruction>(j)->getOperand(i));
1433 
1434         buildTree_rec(Operands, Depth + 1);
1435       }
1436       return;
1437     }
1438     case Instruction::Store: {
1439       // Check if the stores are consecutive or of we need to swizzle them.
1440       for (unsigned i = 0, e = VL.size() - 1; i < e; ++i)
1441         if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) {
1442           BS.cancelScheduling(VL);
1443           newTreeEntry(VL, false, false);
1444           DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
1445           return;
1446         }
1447 
1448       newTreeEntry(VL, true, false);
1449       DEBUG(dbgs() << "SLP: added a vector of stores.\n");
1450 
1451       ValueList Operands;
1452       for (Value *j : VL)
1453         Operands.push_back(cast<Instruction>(j)->getOperand(0));
1454 
1455       buildTree_rec(Operands, Depth + 1);
1456       return;
1457     }
1458     case Instruction::Call: {
1459       // Check if the calls are all to the same vectorizable intrinsic.
1460       CallInst *CI = cast<CallInst>(VL[0]);
1461       // Check if this is an Intrinsic call or something that can be
1462       // represented by an intrinsic call
1463       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
1464       if (!isTriviallyVectorizable(ID)) {
1465         BS.cancelScheduling(VL);
1466         newTreeEntry(VL, false, false);
1467         DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
1468         return;
1469       }
1470       Function *Int = CI->getCalledFunction();
1471       Value *A1I = nullptr;
1472       if (hasVectorInstrinsicScalarOpd(ID, 1))
1473         A1I = CI->getArgOperand(1);
1474       for (unsigned i = 1, e = VL.size(); i != e; ++i) {
1475         CallInst *CI2 = dyn_cast<CallInst>(VL[i]);
1476         if (!CI2 || CI2->getCalledFunction() != Int ||
1477             getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
1478             !CI->hasIdenticalOperandBundleSchema(*CI2)) {
1479           BS.cancelScheduling(VL);
1480           newTreeEntry(VL, false, false);
1481           DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i]
1482                        << "\n");
1483           return;
1484         }
1485         // ctlz,cttz and powi are special intrinsics whose second argument
1486         // should be same in order for them to be vectorized.
1487         if (hasVectorInstrinsicScalarOpd(ID, 1)) {
1488           Value *A1J = CI2->getArgOperand(1);
1489           if (A1I != A1J) {
1490             BS.cancelScheduling(VL);
1491             newTreeEntry(VL, false, false);
1492             DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
1493                          << " argument "<< A1I<<"!=" << A1J
1494                          << "\n");
1495             return;
1496           }
1497         }
1498         // Verify that the bundle operands are identical between the two calls.
1499         if (CI->hasOperandBundles() &&
1500             !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(),
1501                         CI->op_begin() + CI->getBundleOperandsEndIndex(),
1502                         CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
1503           BS.cancelScheduling(VL);
1504           newTreeEntry(VL, false, false);
1505           DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!="
1506                        << *VL[i] << '\n');
1507           return;
1508         }
1509       }
1510 
1511       newTreeEntry(VL, true, false);
1512       for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
1513         ValueList Operands;
1514         // Prepare the operand vector.
1515         for (Value *j : VL) {
1516           CallInst *CI2 = dyn_cast<CallInst>(j);
1517           Operands.push_back(CI2->getArgOperand(i));
1518         }
1519         buildTree_rec(Operands, Depth + 1);
1520       }
1521       return;
1522     }
1523     case Instruction::ShuffleVector: {
1524       // If this is not an alternate sequence of opcode like add-sub
1525       // then do not vectorize this instruction.
1526       if (!isAltShuffle) {
1527         BS.cancelScheduling(VL);
1528         newTreeEntry(VL, false, false);
1529         DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
1530         return;
1531       }
1532       newTreeEntry(VL, true, false);
1533       DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
1534 
1535       // Reorder operands if reordering would enable vectorization.
1536       if (isa<BinaryOperator>(VL0)) {
1537         ValueList Left, Right;
1538         reorderAltShuffleOperands(VL, Left, Right);
1539         buildTree_rec(Left, Depth + 1);
1540         buildTree_rec(Right, Depth + 1);
1541         return;
1542       }
1543 
1544       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1545         ValueList Operands;
1546         // Prepare the operand vector.
1547         for (Value *j : VL)
1548           Operands.push_back(cast<Instruction>(j)->getOperand(i));
1549 
1550         buildTree_rec(Operands, Depth + 1);
1551       }
1552       return;
1553     }
1554     default:
1555       BS.cancelScheduling(VL);
1556       newTreeEntry(VL, false, false);
1557       DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
1558       return;
1559   }
1560 }
1561 
1562 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const {
1563   unsigned N;
1564   Type *EltTy;
1565   auto *ST = dyn_cast<StructType>(T);
1566   if (ST) {
1567     N = ST->getNumElements();
1568     EltTy = *ST->element_begin();
1569   } else {
1570     N = cast<ArrayType>(T)->getNumElements();
1571     EltTy = cast<ArrayType>(T)->getElementType();
1572   }
1573   if (!isValidElementType(EltTy))
1574     return 0;
1575   uint64_t VTSize = DL.getTypeStoreSizeInBits(VectorType::get(EltTy, N));
1576   if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T))
1577     return 0;
1578   if (ST) {
1579     // Check that struct is homogeneous.
1580     for (const auto *Ty : ST->elements())
1581       if (Ty != EltTy)
1582         return 0;
1583   }
1584   return N;
1585 }
1586 
1587 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const {
1588   assert(Opcode == Instruction::ExtractElement ||
1589          Opcode == Instruction::ExtractValue);
1590   assert(Opcode == getSameOpcode(VL) && "Invalid opcode");
1591   // Check if all of the extracts come from the same vector and from the
1592   // correct offset.
1593   Value *VL0 = VL[0];
1594   Instruction *E0 = cast<Instruction>(VL0);
1595   Value *Vec = E0->getOperand(0);
1596 
1597   // We have to extract from a vector/aggregate with the same number of elements.
1598   unsigned NElts;
1599   if (Opcode == Instruction::ExtractValue) {
1600     const DataLayout &DL = E0->getModule()->getDataLayout();
1601     NElts = canMapToVector(Vec->getType(), DL);
1602     if (!NElts)
1603       return false;
1604     // Check if load can be rewritten as load of vector.
1605     LoadInst *LI = dyn_cast<LoadInst>(Vec);
1606     if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size()))
1607       return false;
1608   } else {
1609     NElts = Vec->getType()->getVectorNumElements();
1610   }
1611 
1612   if (NElts != VL.size())
1613     return false;
1614 
1615   // Check that all of the indices extract from the correct offset.
1616   if (!matchExtractIndex(E0, 0, Opcode))
1617     return false;
1618 
1619   for (unsigned i = 1, e = VL.size(); i < e; ++i) {
1620     Instruction *E = cast<Instruction>(VL[i]);
1621     if (!matchExtractIndex(E, i, Opcode))
1622       return false;
1623     if (E->getOperand(0) != Vec)
1624       return false;
1625   }
1626 
1627   return true;
1628 }
1629 
1630 int BoUpSLP::getEntryCost(TreeEntry *E) {
1631   ArrayRef<Value*> VL = E->Scalars;
1632 
1633   Type *ScalarTy = VL[0]->getType();
1634   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
1635     ScalarTy = SI->getValueOperand()->getType();
1636   VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
1637 
1638   // If we have computed a smaller type for the expression, update VecTy so
1639   // that the costs will be accurate.
1640   if (MinBWs.count(VL[0]))
1641     VecTy = VectorType::get(
1642         IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size());
1643 
1644   if (E->NeedToGather) {
1645     if (allConstant(VL))
1646       return 0;
1647     if (isSplat(VL)) {
1648       return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0);
1649     }
1650     return getGatherCost(E->Scalars);
1651   }
1652   unsigned Opcode = getSameOpcode(VL);
1653   assert(Opcode && allSameType(VL) && allSameBlock(VL) && "Invalid VL");
1654   Instruction *VL0 = cast<Instruction>(VL[0]);
1655   switch (Opcode) {
1656     case Instruction::PHI: {
1657       return 0;
1658     }
1659     case Instruction::ExtractValue:
1660     case Instruction::ExtractElement: {
1661       if (canReuseExtract(VL, Opcode)) {
1662         int DeadCost = 0;
1663         for (unsigned i = 0, e = VL.size(); i < e; ++i) {
1664           Instruction *E = cast<Instruction>(VL[i]);
1665           // If all users are going to be vectorized, instruction can be
1666           // considered as dead.
1667           // The same, if have only one user, it will be vectorized for sure.
1668           if (E->hasOneUse() ||
1669               std::all_of(E->user_begin(), E->user_end(), [this](User *U) {
1670                 return ScalarToTreeEntry.count(U) > 0;
1671               }))
1672             // Take credit for instruction that will become dead.
1673             DeadCost +=
1674                 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i);
1675         }
1676         return -DeadCost;
1677       }
1678       return getGatherCost(VecTy);
1679     }
1680     case Instruction::ZExt:
1681     case Instruction::SExt:
1682     case Instruction::FPToUI:
1683     case Instruction::FPToSI:
1684     case Instruction::FPExt:
1685     case Instruction::PtrToInt:
1686     case Instruction::IntToPtr:
1687     case Instruction::SIToFP:
1688     case Instruction::UIToFP:
1689     case Instruction::Trunc:
1690     case Instruction::FPTrunc:
1691     case Instruction::BitCast: {
1692       Type *SrcTy = VL0->getOperand(0)->getType();
1693 
1694       // Calculate the cost of this instruction.
1695       int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(),
1696                                                          VL0->getType(), SrcTy);
1697 
1698       VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size());
1699       int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy);
1700       return VecCost - ScalarCost;
1701     }
1702     case Instruction::FCmp:
1703     case Instruction::ICmp:
1704     case Instruction::Select: {
1705       // Calculate the cost of this instruction.
1706       VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size());
1707       int ScalarCost = VecTy->getNumElements() *
1708           TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty());
1709       int VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy);
1710       return VecCost - ScalarCost;
1711     }
1712     case Instruction::Add:
1713     case Instruction::FAdd:
1714     case Instruction::Sub:
1715     case Instruction::FSub:
1716     case Instruction::Mul:
1717     case Instruction::FMul:
1718     case Instruction::UDiv:
1719     case Instruction::SDiv:
1720     case Instruction::FDiv:
1721     case Instruction::URem:
1722     case Instruction::SRem:
1723     case Instruction::FRem:
1724     case Instruction::Shl:
1725     case Instruction::LShr:
1726     case Instruction::AShr:
1727     case Instruction::And:
1728     case Instruction::Or:
1729     case Instruction::Xor: {
1730       // Certain instructions can be cheaper to vectorize if they have a
1731       // constant second vector operand.
1732       TargetTransformInfo::OperandValueKind Op1VK =
1733           TargetTransformInfo::OK_AnyValue;
1734       TargetTransformInfo::OperandValueKind Op2VK =
1735           TargetTransformInfo::OK_UniformConstantValue;
1736       TargetTransformInfo::OperandValueProperties Op1VP =
1737           TargetTransformInfo::OP_None;
1738       TargetTransformInfo::OperandValueProperties Op2VP =
1739           TargetTransformInfo::OP_None;
1740 
1741       // If all operands are exactly the same ConstantInt then set the
1742       // operand kind to OK_UniformConstantValue.
1743       // If instead not all operands are constants, then set the operand kind
1744       // to OK_AnyValue. If all operands are constants but not the same,
1745       // then set the operand kind to OK_NonUniformConstantValue.
1746       ConstantInt *CInt = nullptr;
1747       for (unsigned i = 0; i < VL.size(); ++i) {
1748         const Instruction *I = cast<Instruction>(VL[i]);
1749         if (!isa<ConstantInt>(I->getOperand(1))) {
1750           Op2VK = TargetTransformInfo::OK_AnyValue;
1751           break;
1752         }
1753         if (i == 0) {
1754           CInt = cast<ConstantInt>(I->getOperand(1));
1755           continue;
1756         }
1757         if (Op2VK == TargetTransformInfo::OK_UniformConstantValue &&
1758             CInt != cast<ConstantInt>(I->getOperand(1)))
1759           Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
1760       }
1761       // FIXME: Currently cost of model modification for division by power of
1762       // 2 is handled for X86 and AArch64. Add support for other targets.
1763       if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt &&
1764           CInt->getValue().isPowerOf2())
1765         Op2VP = TargetTransformInfo::OP_PowerOf2;
1766 
1767       int ScalarCost = VecTy->getNumElements() *
1768                        TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK,
1769                                                    Op2VK, Op1VP, Op2VP);
1770       int VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK,
1771                                                 Op1VP, Op2VP);
1772       return VecCost - ScalarCost;
1773     }
1774     case Instruction::GetElementPtr: {
1775       TargetTransformInfo::OperandValueKind Op1VK =
1776           TargetTransformInfo::OK_AnyValue;
1777       TargetTransformInfo::OperandValueKind Op2VK =
1778           TargetTransformInfo::OK_UniformConstantValue;
1779 
1780       int ScalarCost =
1781           VecTy->getNumElements() *
1782           TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK);
1783       int VecCost =
1784           TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK);
1785 
1786       return VecCost - ScalarCost;
1787     }
1788     case Instruction::Load: {
1789       // Cost of wide load - cost of scalar loads.
1790       unsigned alignment = dyn_cast<LoadInst>(VL0)->getAlignment();
1791       int ScalarLdCost = VecTy->getNumElements() *
1792             TTI->getMemoryOpCost(Instruction::Load, ScalarTy, alignment, 0);
1793       int VecLdCost = TTI->getMemoryOpCost(Instruction::Load,
1794                                            VecTy, alignment, 0);
1795       if (E->NeedToShuffle) {
1796         VecLdCost += TTI->getShuffleCost(
1797             TargetTransformInfo::SK_PermuteSingleSrc, VecTy, 0);
1798       }
1799       return VecLdCost - ScalarLdCost;
1800     }
1801     case Instruction::Store: {
1802       // We know that we can merge the stores. Calculate the cost.
1803       unsigned alignment = dyn_cast<StoreInst>(VL0)->getAlignment();
1804       int ScalarStCost = VecTy->getNumElements() *
1805             TTI->getMemoryOpCost(Instruction::Store, ScalarTy, alignment, 0);
1806       int VecStCost = TTI->getMemoryOpCost(Instruction::Store,
1807                                            VecTy, alignment, 0);
1808       return VecStCost - ScalarStCost;
1809     }
1810     case Instruction::Call: {
1811       CallInst *CI = cast<CallInst>(VL0);
1812       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
1813 
1814       // Calculate the cost of the scalar and vector calls.
1815       SmallVector<Type*, 4> ScalarTys, VecTys;
1816       for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) {
1817         ScalarTys.push_back(CI->getArgOperand(op)->getType());
1818         VecTys.push_back(VectorType::get(CI->getArgOperand(op)->getType(),
1819                                          VecTy->getNumElements()));
1820       }
1821 
1822       FastMathFlags FMF;
1823       if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
1824         FMF = FPMO->getFastMathFlags();
1825 
1826       int ScalarCallCost = VecTy->getNumElements() *
1827           TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys, FMF);
1828 
1829       int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys, FMF);
1830 
1831       DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost
1832             << " (" << VecCallCost  << "-" <<  ScalarCallCost << ")"
1833             << " for " << *CI << "\n");
1834 
1835       return VecCallCost - ScalarCallCost;
1836     }
1837     case Instruction::ShuffleVector: {
1838       TargetTransformInfo::OperandValueKind Op1VK =
1839           TargetTransformInfo::OK_AnyValue;
1840       TargetTransformInfo::OperandValueKind Op2VK =
1841           TargetTransformInfo::OK_AnyValue;
1842       int ScalarCost = 0;
1843       int VecCost = 0;
1844       for (Value *i : VL) {
1845         Instruction *I = cast<Instruction>(i);
1846         if (!I)
1847           break;
1848         ScalarCost +=
1849             TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK);
1850       }
1851       // VecCost is equal to sum of the cost of creating 2 vectors
1852       // and the cost of creating shuffle.
1853       Instruction *I0 = cast<Instruction>(VL[0]);
1854       VecCost =
1855           TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK);
1856       Instruction *I1 = cast<Instruction>(VL[1]);
1857       VecCost +=
1858           TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK);
1859       VecCost +=
1860           TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0);
1861       return VecCost - ScalarCost;
1862     }
1863     default:
1864       llvm_unreachable("Unknown instruction");
1865   }
1866 }
1867 
1868 bool BoUpSLP::isFullyVectorizableTinyTree() {
1869   DEBUG(dbgs() << "SLP: Check whether the tree with height " <<
1870         VectorizableTree.size() << " is fully vectorizable .\n");
1871 
1872   // We only handle trees of heights 1 and 2.
1873   if (VectorizableTree.size() == 1 && !VectorizableTree[0].NeedToGather)
1874     return true;
1875 
1876   if (VectorizableTree.size() != 2)
1877     return false;
1878 
1879   // Handle splat and all-constants stores.
1880   if (!VectorizableTree[0].NeedToGather &&
1881       (allConstant(VectorizableTree[1].Scalars) ||
1882        isSplat(VectorizableTree[1].Scalars)))
1883     return true;
1884 
1885   // Gathering cost would be too much for tiny trees.
1886   if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather)
1887     return false;
1888 
1889   return true;
1890 }
1891 
1892 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() {
1893 
1894   // We can vectorize the tree if its size is greater than or equal to the
1895   // minimum size specified by the MinTreeSize command line option.
1896   if (VectorizableTree.size() >= MinTreeSize)
1897     return false;
1898 
1899   // If we have a tiny tree (a tree whose size is less than MinTreeSize), we
1900   // can vectorize it if we can prove it fully vectorizable.
1901   if (isFullyVectorizableTinyTree())
1902     return false;
1903 
1904   assert(VectorizableTree.empty()
1905              ? ExternalUses.empty()
1906              : true && "We shouldn't have any external users");
1907 
1908   // Otherwise, we can't vectorize the tree. It is both tiny and not fully
1909   // vectorizable.
1910   return true;
1911 }
1912 
1913 int BoUpSLP::getSpillCost() {
1914   // Walk from the bottom of the tree to the top, tracking which values are
1915   // live. When we see a call instruction that is not part of our tree,
1916   // query TTI to see if there is a cost to keeping values live over it
1917   // (for example, if spills and fills are required).
1918   unsigned BundleWidth = VectorizableTree.front().Scalars.size();
1919   int Cost = 0;
1920 
1921   SmallPtrSet<Instruction*, 4> LiveValues;
1922   Instruction *PrevInst = nullptr;
1923 
1924   for (const auto &N : VectorizableTree) {
1925     Instruction *Inst = dyn_cast<Instruction>(N.Scalars[0]);
1926     if (!Inst)
1927       continue;
1928 
1929     if (!PrevInst) {
1930       PrevInst = Inst;
1931       continue;
1932     }
1933 
1934     // Update LiveValues.
1935     LiveValues.erase(PrevInst);
1936     for (auto &J : PrevInst->operands()) {
1937       if (isa<Instruction>(&*J) && ScalarToTreeEntry.count(&*J))
1938         LiveValues.insert(cast<Instruction>(&*J));
1939     }
1940 
1941     DEBUG(
1942       dbgs() << "SLP: #LV: " << LiveValues.size();
1943       for (auto *X : LiveValues)
1944         dbgs() << " " << X->getName();
1945       dbgs() << ", Looking at ";
1946       Inst->dump();
1947       );
1948 
1949     // Now find the sequence of instructions between PrevInst and Inst.
1950     BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(),
1951                                  PrevInstIt =
1952                                      PrevInst->getIterator().getReverse();
1953     while (InstIt != PrevInstIt) {
1954       if (PrevInstIt == PrevInst->getParent()->rend()) {
1955         PrevInstIt = Inst->getParent()->rbegin();
1956         continue;
1957       }
1958 
1959       if (isa<CallInst>(&*PrevInstIt) && &*PrevInstIt != PrevInst) {
1960         SmallVector<Type*, 4> V;
1961         for (auto *II : LiveValues)
1962           V.push_back(VectorType::get(II->getType(), BundleWidth));
1963         Cost += TTI->getCostOfKeepingLiveOverCall(V);
1964       }
1965 
1966       ++PrevInstIt;
1967     }
1968 
1969     PrevInst = Inst;
1970   }
1971 
1972   return Cost;
1973 }
1974 
1975 int BoUpSLP::getTreeCost() {
1976   int Cost = 0;
1977   DEBUG(dbgs() << "SLP: Calculating cost for tree of size " <<
1978         VectorizableTree.size() << ".\n");
1979 
1980   unsigned BundleWidth = VectorizableTree[0].Scalars.size();
1981 
1982   for (TreeEntry &TE : VectorizableTree) {
1983     int C = getEntryCost(&TE);
1984     DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with "
1985                  << *TE.Scalars[0] << ".\n");
1986     Cost += C;
1987   }
1988 
1989   SmallSet<Value *, 16> ExtractCostCalculated;
1990   int ExtractCost = 0;
1991   for (ExternalUser &EU : ExternalUses) {
1992     // We only add extract cost once for the same scalar.
1993     if (!ExtractCostCalculated.insert(EU.Scalar).second)
1994       continue;
1995 
1996     // Uses by ephemeral values are free (because the ephemeral value will be
1997     // removed prior to code generation, and so the extraction will be
1998     // removed as well).
1999     if (EphValues.count(EU.User))
2000       continue;
2001 
2002     // If we plan to rewrite the tree in a smaller type, we will need to sign
2003     // extend the extracted value back to the original type. Here, we account
2004     // for the extract and the added cost of the sign extend if needed.
2005     auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth);
2006     auto *ScalarRoot = VectorizableTree[0].Scalars[0];
2007     if (MinBWs.count(ScalarRoot)) {
2008       auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
2009       auto Extend =
2010           MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt;
2011       VecTy = VectorType::get(MinTy, BundleWidth);
2012       ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(),
2013                                                    VecTy, EU.Lane);
2014     } else {
2015       ExtractCost +=
2016           TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane);
2017     }
2018   }
2019 
2020   int SpillCost = getSpillCost();
2021   Cost += SpillCost + ExtractCost;
2022 
2023   DEBUG(dbgs() << "SLP: Spill Cost = " << SpillCost << ".\n"
2024                << "SLP: Extract Cost = " << ExtractCost << ".\n"
2025                << "SLP: Total Cost = " << Cost << ".\n");
2026   return Cost;
2027 }
2028 
2029 int BoUpSLP::getGatherCost(Type *Ty) {
2030   int Cost = 0;
2031   for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i)
2032     Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
2033   return Cost;
2034 }
2035 
2036 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) {
2037   // Find the type of the operands in VL.
2038   Type *ScalarTy = VL[0]->getType();
2039   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
2040     ScalarTy = SI->getValueOperand()->getType();
2041   VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
2042   // Find the cost of inserting/extracting values from the vector.
2043   return getGatherCost(VecTy);
2044 }
2045 
2046 // Reorder commutative operations in alternate shuffle if the resulting vectors
2047 // are consecutive loads. This would allow us to vectorize the tree.
2048 // If we have something like-
2049 // load a[0] - load b[0]
2050 // load b[1] + load a[1]
2051 // load a[2] - load b[2]
2052 // load a[3] + load b[3]
2053 // Reordering the second load b[1]  load a[1] would allow us to vectorize this
2054 // code.
2055 void BoUpSLP::reorderAltShuffleOperands(ArrayRef<Value *> VL,
2056                                         SmallVectorImpl<Value *> &Left,
2057                                         SmallVectorImpl<Value *> &Right) {
2058   // Push left and right operands of binary operation into Left and Right
2059   for (Value *i : VL) {
2060     Left.push_back(cast<Instruction>(i)->getOperand(0));
2061     Right.push_back(cast<Instruction>(i)->getOperand(1));
2062   }
2063 
2064   // Reorder if we have a commutative operation and consecutive access
2065   // are on either side of the alternate instructions.
2066   for (unsigned j = 0; j < VL.size() - 1; ++j) {
2067     if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) {
2068       if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) {
2069         Instruction *VL1 = cast<Instruction>(VL[j]);
2070         Instruction *VL2 = cast<Instruction>(VL[j + 1]);
2071         if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) {
2072           std::swap(Left[j], Right[j]);
2073           continue;
2074         } else if (VL2->isCommutative() &&
2075                    isConsecutiveAccess(L, L1, *DL, *SE)) {
2076           std::swap(Left[j + 1], Right[j + 1]);
2077           continue;
2078         }
2079         // else unchanged
2080       }
2081     }
2082     if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) {
2083       if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) {
2084         Instruction *VL1 = cast<Instruction>(VL[j]);
2085         Instruction *VL2 = cast<Instruction>(VL[j + 1]);
2086         if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) {
2087           std::swap(Left[j], Right[j]);
2088           continue;
2089         } else if (VL2->isCommutative() &&
2090                    isConsecutiveAccess(L, L1, *DL, *SE)) {
2091           std::swap(Left[j + 1], Right[j + 1]);
2092           continue;
2093         }
2094         // else unchanged
2095       }
2096     }
2097   }
2098 }
2099 
2100 // Return true if I should be commuted before adding it's left and right
2101 // operands to the arrays Left and Right.
2102 //
2103 // The vectorizer is trying to either have all elements one side being
2104 // instruction with the same opcode to enable further vectorization, or having
2105 // a splat to lower the vectorizing cost.
2106 static bool shouldReorderOperands(int i, Instruction &I,
2107                                   SmallVectorImpl<Value *> &Left,
2108                                   SmallVectorImpl<Value *> &Right,
2109                                   bool AllSameOpcodeLeft,
2110                                   bool AllSameOpcodeRight, bool SplatLeft,
2111                                   bool SplatRight) {
2112   Value *VLeft = I.getOperand(0);
2113   Value *VRight = I.getOperand(1);
2114   // If we have "SplatRight", try to see if commuting is needed to preserve it.
2115   if (SplatRight) {
2116     if (VRight == Right[i - 1])
2117       // Preserve SplatRight
2118       return false;
2119     if (VLeft == Right[i - 1]) {
2120       // Commuting would preserve SplatRight, but we don't want to break
2121       // SplatLeft either, i.e. preserve the original order if possible.
2122       // (FIXME: why do we care?)
2123       if (SplatLeft && VLeft == Left[i - 1])
2124         return false;
2125       return true;
2126     }
2127   }
2128   // Symmetrically handle Right side.
2129   if (SplatLeft) {
2130     if (VLeft == Left[i - 1])
2131       // Preserve SplatLeft
2132       return false;
2133     if (VRight == Left[i - 1])
2134       return true;
2135   }
2136 
2137   Instruction *ILeft = dyn_cast<Instruction>(VLeft);
2138   Instruction *IRight = dyn_cast<Instruction>(VRight);
2139 
2140   // If we have "AllSameOpcodeRight", try to see if the left operands preserves
2141   // it and not the right, in this case we want to commute.
2142   if (AllSameOpcodeRight) {
2143     unsigned RightPrevOpcode = cast<Instruction>(Right[i - 1])->getOpcode();
2144     if (IRight && RightPrevOpcode == IRight->getOpcode())
2145       // Do not commute, a match on the right preserves AllSameOpcodeRight
2146       return false;
2147     if (ILeft && RightPrevOpcode == ILeft->getOpcode()) {
2148       // We have a match and may want to commute, but first check if there is
2149       // not also a match on the existing operands on the Left to preserve
2150       // AllSameOpcodeLeft, i.e. preserve the original order if possible.
2151       // (FIXME: why do we care?)
2152       if (AllSameOpcodeLeft && ILeft &&
2153           cast<Instruction>(Left[i - 1])->getOpcode() == ILeft->getOpcode())
2154         return false;
2155       return true;
2156     }
2157   }
2158   // Symmetrically handle Left side.
2159   if (AllSameOpcodeLeft) {
2160     unsigned LeftPrevOpcode = cast<Instruction>(Left[i - 1])->getOpcode();
2161     if (ILeft && LeftPrevOpcode == ILeft->getOpcode())
2162       return false;
2163     if (IRight && LeftPrevOpcode == IRight->getOpcode())
2164       return true;
2165   }
2166   return false;
2167 }
2168 
2169 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
2170                                              SmallVectorImpl<Value *> &Left,
2171                                              SmallVectorImpl<Value *> &Right) {
2172 
2173   if (VL.size()) {
2174     // Peel the first iteration out of the loop since there's nothing
2175     // interesting to do anyway and it simplifies the checks in the loop.
2176     auto VLeft = cast<Instruction>(VL[0])->getOperand(0);
2177     auto VRight = cast<Instruction>(VL[0])->getOperand(1);
2178     if (!isa<Instruction>(VRight) && isa<Instruction>(VLeft))
2179       // Favor having instruction to the right. FIXME: why?
2180       std::swap(VLeft, VRight);
2181     Left.push_back(VLeft);
2182     Right.push_back(VRight);
2183   }
2184 
2185   // Keep track if we have instructions with all the same opcode on one side.
2186   bool AllSameOpcodeLeft = isa<Instruction>(Left[0]);
2187   bool AllSameOpcodeRight = isa<Instruction>(Right[0]);
2188   // Keep track if we have one side with all the same value (broadcast).
2189   bool SplatLeft = true;
2190   bool SplatRight = true;
2191 
2192   for (unsigned i = 1, e = VL.size(); i != e; ++i) {
2193     Instruction *I = cast<Instruction>(VL[i]);
2194     assert(I->isCommutative() && "Can only process commutative instruction");
2195     // Commute to favor either a splat or maximizing having the same opcodes on
2196     // one side.
2197     if (shouldReorderOperands(i, *I, Left, Right, AllSameOpcodeLeft,
2198                               AllSameOpcodeRight, SplatLeft, SplatRight)) {
2199       Left.push_back(I->getOperand(1));
2200       Right.push_back(I->getOperand(0));
2201     } else {
2202       Left.push_back(I->getOperand(0));
2203       Right.push_back(I->getOperand(1));
2204     }
2205     // Update Splat* and AllSameOpcode* after the insertion.
2206     SplatRight = SplatRight && (Right[i - 1] == Right[i]);
2207     SplatLeft = SplatLeft && (Left[i - 1] == Left[i]);
2208     AllSameOpcodeLeft = AllSameOpcodeLeft && isa<Instruction>(Left[i]) &&
2209                         (cast<Instruction>(Left[i - 1])->getOpcode() ==
2210                          cast<Instruction>(Left[i])->getOpcode());
2211     AllSameOpcodeRight = AllSameOpcodeRight && isa<Instruction>(Right[i]) &&
2212                          (cast<Instruction>(Right[i - 1])->getOpcode() ==
2213                           cast<Instruction>(Right[i])->getOpcode());
2214   }
2215 
2216   // If one operand end up being broadcast, return this operand order.
2217   if (SplatRight || SplatLeft)
2218     return;
2219 
2220   // Finally check if we can get longer vectorizable chain by reordering
2221   // without breaking the good operand order detected above.
2222   // E.g. If we have something like-
2223   // load a[0]  load b[0]
2224   // load b[1]  load a[1]
2225   // load a[2]  load b[2]
2226   // load a[3]  load b[3]
2227   // Reordering the second load b[1]  load a[1] would allow us to vectorize
2228   // this code and we still retain AllSameOpcode property.
2229   // FIXME: This load reordering might break AllSameOpcode in some rare cases
2230   // such as-
2231   // add a[0],c[0]  load b[0]
2232   // add a[1],c[2]  load b[1]
2233   // b[2]           load b[2]
2234   // add a[3],c[3]  load b[3]
2235   for (unsigned j = 0; j < VL.size() - 1; ++j) {
2236     if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) {
2237       if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) {
2238         if (isConsecutiveAccess(L, L1, *DL, *SE)) {
2239           std::swap(Left[j + 1], Right[j + 1]);
2240           continue;
2241         }
2242       }
2243     }
2244     if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) {
2245       if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) {
2246         if (isConsecutiveAccess(L, L1, *DL, *SE)) {
2247           std::swap(Left[j + 1], Right[j + 1]);
2248           continue;
2249         }
2250       }
2251     }
2252     // else unchanged
2253   }
2254 }
2255 
2256 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) {
2257 
2258   // Get the basic block this bundle is in. All instructions in the bundle
2259   // should be in this block.
2260   auto *Front = cast<Instruction>(VL.front());
2261   auto *BB = Front->getParent();
2262   assert(all_of(make_range(VL.begin(), VL.end()), [&](Value *V) -> bool {
2263     return cast<Instruction>(V)->getParent() == BB;
2264   }));
2265 
2266   // The last instruction in the bundle in program order.
2267   Instruction *LastInst = nullptr;
2268 
2269   // Find the last instruction. The common case should be that BB has been
2270   // scheduled, and the last instruction is VL.back(). So we start with
2271   // VL.back() and iterate over schedule data until we reach the end of the
2272   // bundle. The end of the bundle is marked by null ScheduleData.
2273   if (BlocksSchedules.count(BB)) {
2274     auto *Bundle = BlocksSchedules[BB]->getScheduleData(VL.back());
2275     if (Bundle && Bundle->isPartOfBundle())
2276       for (; Bundle; Bundle = Bundle->NextInBundle)
2277         LastInst = Bundle->Inst;
2278   }
2279 
2280   // LastInst can still be null at this point if there's either not an entry
2281   // for BB in BlocksSchedules or there's no ScheduleData available for
2282   // VL.back(). This can be the case if buildTree_rec aborts for various
2283   // reasons (e.g., the maximum recursion depth is reached, the maximum region
2284   // size is reached, etc.). ScheduleData is initialized in the scheduling
2285   // "dry-run".
2286   //
2287   // If this happens, we can still find the last instruction by brute force. We
2288   // iterate forwards from Front (inclusive) until we either see all
2289   // instructions in the bundle or reach the end of the block. If Front is the
2290   // last instruction in program order, LastInst will be set to Front, and we
2291   // will visit all the remaining instructions in the block.
2292   //
2293   // One of the reasons we exit early from buildTree_rec is to place an upper
2294   // bound on compile-time. Thus, taking an additional compile-time hit here is
2295   // not ideal. However, this should be exceedingly rare since it requires that
2296   // we both exit early from buildTree_rec and that the bundle be out-of-order
2297   // (causing us to iterate all the way to the end of the block).
2298   if (!LastInst) {
2299     SmallPtrSet<Value *, 16> Bundle(VL.begin(), VL.end());
2300     for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) {
2301       if (Bundle.erase(&I))
2302         LastInst = &I;
2303       if (Bundle.empty())
2304         break;
2305     }
2306   }
2307 
2308   // Set the insertion point after the last instruction in the bundle. Set the
2309   // debug location to Front.
2310   Builder.SetInsertPoint(BB, ++LastInst->getIterator());
2311   Builder.SetCurrentDebugLocation(Front->getDebugLoc());
2312 }
2313 
2314 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) {
2315   Value *Vec = UndefValue::get(Ty);
2316   // Generate the 'InsertElement' instruction.
2317   for (unsigned i = 0; i < Ty->getNumElements(); ++i) {
2318     Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i));
2319     if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) {
2320       GatherSeq.insert(Insrt);
2321       CSEBlocks.insert(Insrt->getParent());
2322 
2323       // Add to our 'need-to-extract' list.
2324       if (ScalarToTreeEntry.count(VL[i])) {
2325         int Idx = ScalarToTreeEntry[VL[i]];
2326         TreeEntry *E = &VectorizableTree[Idx];
2327         // Find which lane we need to extract.
2328         int FoundLane = -1;
2329         for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) {
2330           // Is this the lane of the scalar that we are looking for ?
2331           if (E->Scalars[Lane] == VL[i]) {
2332             FoundLane = Lane;
2333             break;
2334           }
2335         }
2336         assert(FoundLane >= 0 && "Could not find the correct lane");
2337         ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane));
2338       }
2339     }
2340   }
2341 
2342   return Vec;
2343 }
2344 
2345 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const {
2346   SmallDenseMap<Value*, int>::const_iterator Entry
2347     = ScalarToTreeEntry.find(VL[0]);
2348   if (Entry != ScalarToTreeEntry.end()) {
2349     int Idx = Entry->second;
2350     const TreeEntry *En = &VectorizableTree[Idx];
2351     if (En->isSame(VL) && En->VectorizedValue)
2352       return En->VectorizedValue;
2353   }
2354   return nullptr;
2355 }
2356 
2357 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
2358   if (ScalarToTreeEntry.count(VL[0])) {
2359     int Idx = ScalarToTreeEntry[VL[0]];
2360     TreeEntry *E = &VectorizableTree[Idx];
2361     if (E->isSame(VL) || (E->NeedToShuffle && E->isFoundJumbled(VL, *DL, *SE)))
2362       return vectorizeTree(VL, E);
2363   }
2364 
2365   Type *ScalarTy = VL[0]->getType();
2366   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
2367     ScalarTy = SI->getValueOperand()->getType();
2368   VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
2369 
2370   return Gather(VL, VecTy);
2371 }
2372 
2373 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL, TreeEntry *E) {
2374   IRBuilder<>::InsertPointGuard Guard(Builder);
2375 
2376   if (E->VectorizedValue && !E->NeedToShuffle) {
2377     DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
2378     return E->VectorizedValue;
2379   }
2380 
2381   Instruction *VL0 = cast<Instruction>(E->Scalars[0]);
2382   Type *ScalarTy = VL0->getType();
2383   if (StoreInst *SI = dyn_cast<StoreInst>(VL0))
2384     ScalarTy = SI->getValueOperand()->getType();
2385   VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size());
2386 
2387   if (E->NeedToGather) {
2388     setInsertPointAfterBundle(E->Scalars);
2389     auto *V = Gather(E->Scalars, VecTy);
2390     E->VectorizedValue = V;
2391     return V;
2392   }
2393 
2394   unsigned Opcode = getSameOpcode(E->Scalars);
2395 
2396   switch (Opcode) {
2397     case Instruction::PHI: {
2398       PHINode *PH = dyn_cast<PHINode>(VL0);
2399       Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI());
2400       Builder.SetCurrentDebugLocation(PH->getDebugLoc());
2401       PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
2402       E->VectorizedValue = NewPhi;
2403 
2404       // PHINodes may have multiple entries from the same block. We want to
2405       // visit every block once.
2406       SmallSet<BasicBlock*, 4> VisitedBBs;
2407 
2408       for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
2409         ValueList Operands;
2410         BasicBlock *IBB = PH->getIncomingBlock(i);
2411 
2412         if (!VisitedBBs.insert(IBB).second) {
2413           NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
2414           continue;
2415         }
2416 
2417         // Prepare the operand vector.
2418         for (Value *V : E->Scalars)
2419           Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(IBB));
2420 
2421         Builder.SetInsertPoint(IBB->getTerminator());
2422         Builder.SetCurrentDebugLocation(PH->getDebugLoc());
2423         Value *Vec = vectorizeTree(Operands);
2424         NewPhi->addIncoming(Vec, IBB);
2425       }
2426 
2427       assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&
2428              "Invalid number of incoming values");
2429       return NewPhi;
2430     }
2431 
2432     case Instruction::ExtractElement: {
2433       if (canReuseExtract(E->Scalars, Instruction::ExtractElement)) {
2434         Value *V = VL0->getOperand(0);
2435         E->VectorizedValue = V;
2436         return V;
2437       }
2438       setInsertPointAfterBundle(E->Scalars);
2439       auto *V = Gather(E->Scalars, VecTy);
2440       E->VectorizedValue = V;
2441       return V;
2442     }
2443     case Instruction::ExtractValue: {
2444       if (canReuseExtract(E->Scalars, Instruction::ExtractValue)) {
2445         LoadInst *LI = cast<LoadInst>(VL0->getOperand(0));
2446         Builder.SetInsertPoint(LI);
2447         PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace());
2448         Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy);
2449         LoadInst *V = Builder.CreateAlignedLoad(Ptr, LI->getAlignment());
2450         E->VectorizedValue = V;
2451         return propagateMetadata(V, E->Scalars);
2452       }
2453       setInsertPointAfterBundle(E->Scalars);
2454       auto *V = Gather(E->Scalars, VecTy);
2455       E->VectorizedValue = V;
2456       return V;
2457     }
2458     case Instruction::ZExt:
2459     case Instruction::SExt:
2460     case Instruction::FPToUI:
2461     case Instruction::FPToSI:
2462     case Instruction::FPExt:
2463     case Instruction::PtrToInt:
2464     case Instruction::IntToPtr:
2465     case Instruction::SIToFP:
2466     case Instruction::UIToFP:
2467     case Instruction::Trunc:
2468     case Instruction::FPTrunc:
2469     case Instruction::BitCast: {
2470       ValueList INVL;
2471       for (Value *V : E->Scalars)
2472         INVL.push_back(cast<Instruction>(V)->getOperand(0));
2473 
2474       setInsertPointAfterBundle(E->Scalars);
2475 
2476       Value *InVec = vectorizeTree(INVL);
2477 
2478       if (Value *V = alreadyVectorized(E->Scalars))
2479         return V;
2480 
2481       CastInst *CI = dyn_cast<CastInst>(VL0);
2482       Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy);
2483       E->VectorizedValue = V;
2484       ++NumVectorInstructions;
2485       return V;
2486     }
2487     case Instruction::FCmp:
2488     case Instruction::ICmp: {
2489       ValueList LHSV, RHSV;
2490       for (Value *V : E->Scalars) {
2491         LHSV.push_back(cast<Instruction>(V)->getOperand(0));
2492         RHSV.push_back(cast<Instruction>(V)->getOperand(1));
2493       }
2494 
2495       setInsertPointAfterBundle(E->Scalars);
2496 
2497       Value *L = vectorizeTree(LHSV);
2498       Value *R = vectorizeTree(RHSV);
2499 
2500       if (Value *V = alreadyVectorized(E->Scalars))
2501         return V;
2502 
2503       CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
2504       Value *V;
2505       if (Opcode == Instruction::FCmp)
2506         V = Builder.CreateFCmp(P0, L, R);
2507       else
2508         V = Builder.CreateICmp(P0, L, R);
2509 
2510       E->VectorizedValue = V;
2511       propagateIRFlags(E->VectorizedValue, E->Scalars);
2512       ++NumVectorInstructions;
2513       return V;
2514     }
2515     case Instruction::Select: {
2516       ValueList TrueVec, FalseVec, CondVec;
2517       for (Value *V : E->Scalars) {
2518         CondVec.push_back(cast<Instruction>(V)->getOperand(0));
2519         TrueVec.push_back(cast<Instruction>(V)->getOperand(1));
2520         FalseVec.push_back(cast<Instruction>(V)->getOperand(2));
2521       }
2522 
2523       setInsertPointAfterBundle(E->Scalars);
2524 
2525       Value *Cond = vectorizeTree(CondVec);
2526       Value *True = vectorizeTree(TrueVec);
2527       Value *False = vectorizeTree(FalseVec);
2528 
2529       if (Value *V = alreadyVectorized(E->Scalars))
2530         return V;
2531 
2532       Value *V = Builder.CreateSelect(Cond, True, False);
2533       E->VectorizedValue = V;
2534       ++NumVectorInstructions;
2535       return V;
2536     }
2537     case Instruction::Add:
2538     case Instruction::FAdd:
2539     case Instruction::Sub:
2540     case Instruction::FSub:
2541     case Instruction::Mul:
2542     case Instruction::FMul:
2543     case Instruction::UDiv:
2544     case Instruction::SDiv:
2545     case Instruction::FDiv:
2546     case Instruction::URem:
2547     case Instruction::SRem:
2548     case Instruction::FRem:
2549     case Instruction::Shl:
2550     case Instruction::LShr:
2551     case Instruction::AShr:
2552     case Instruction::And:
2553     case Instruction::Or:
2554     case Instruction::Xor: {
2555       ValueList LHSVL, RHSVL;
2556       if (isa<BinaryOperator>(VL0) && VL0->isCommutative())
2557         reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL);
2558       else
2559         for (Value *V : E->Scalars) {
2560           LHSVL.push_back(cast<Instruction>(V)->getOperand(0));
2561           RHSVL.push_back(cast<Instruction>(V)->getOperand(1));
2562         }
2563 
2564       setInsertPointAfterBundle(E->Scalars);
2565 
2566       Value *LHS = vectorizeTree(LHSVL);
2567       Value *RHS = vectorizeTree(RHSVL);
2568 
2569       if (Value *V = alreadyVectorized(E->Scalars))
2570         return V;
2571 
2572       BinaryOperator *BinOp = cast<BinaryOperator>(VL0);
2573       Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS);
2574       E->VectorizedValue = V;
2575       propagateIRFlags(E->VectorizedValue, E->Scalars);
2576       ++NumVectorInstructions;
2577 
2578       if (Instruction *I = dyn_cast<Instruction>(V))
2579         return propagateMetadata(I, E->Scalars);
2580 
2581       return V;
2582     }
2583     case Instruction::Load: {
2584       // Loads are inserted at the head of the tree because we don't want to
2585       // sink them all the way down past store instructions.
2586       setInsertPointAfterBundle(E->Scalars);
2587 
2588       LoadInst *LI = cast<LoadInst>(VL0);
2589       Type *ScalarLoadTy = LI->getType();
2590       unsigned AS = LI->getPointerAddressSpace();
2591 
2592       Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(),
2593                                             VecTy->getPointerTo(AS));
2594 
2595       // The pointer operand uses an in-tree scalar so we add the new BitCast to
2596       // ExternalUses list to make sure that an extract will be generated in the
2597       // future.
2598       if (ScalarToTreeEntry.count(LI->getPointerOperand()))
2599         ExternalUses.push_back(
2600             ExternalUser(LI->getPointerOperand(), cast<User>(VecPtr), 0));
2601 
2602       unsigned Alignment = LI->getAlignment();
2603       LI = Builder.CreateLoad(VecPtr);
2604       if (!Alignment) {
2605         Alignment = DL->getABITypeAlignment(ScalarLoadTy);
2606       }
2607       LI->setAlignment(Alignment);
2608       E->VectorizedValue = LI;
2609       ++NumVectorInstructions;
2610       propagateMetadata(LI, E->Scalars);
2611 
2612       // As program order of scalar loads are jumbled, the vectorized 'load'
2613       // must be followed by a 'shuffle' with the required jumbled mask.
2614       if (!VL.empty() && (E->NeedToShuffle)) {
2615         assert(VL.size() == E->Scalars.size() &&
2616                "Equal number of scalars expected");
2617         SmallVector<Constant *, 8> Mask;
2618         for (Value *Val : VL) {
2619           if (ScalarToTreeEntry.count(Val)) {
2620             int Idx = ScalarToTreeEntry[Val];
2621             TreeEntry *E = &VectorizableTree[Idx];
2622             for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) {
2623               if (E->Scalars[Lane] == Val) {
2624                 Mask.push_back(Builder.getInt32(Lane));
2625                 break;
2626               }
2627             }
2628           }
2629         }
2630 
2631         // Generate shuffle for jumbled memory access
2632         Value *Undef = UndefValue::get(VecTy);
2633         Value *Shuf = Builder.CreateShuffleVector((Value *)LI, Undef,
2634                                                   ConstantVector::get(Mask));
2635         return Shuf;
2636       }
2637 
2638       return LI;
2639     }
2640     case Instruction::Store: {
2641       StoreInst *SI = cast<StoreInst>(VL0);
2642       unsigned Alignment = SI->getAlignment();
2643       unsigned AS = SI->getPointerAddressSpace();
2644 
2645       ValueList ValueOp;
2646       for (Value *V : E->Scalars)
2647         ValueOp.push_back(cast<StoreInst>(V)->getValueOperand());
2648 
2649       setInsertPointAfterBundle(E->Scalars);
2650 
2651       Value *VecValue = vectorizeTree(ValueOp);
2652       Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(),
2653                                             VecTy->getPointerTo(AS));
2654       StoreInst *S = Builder.CreateStore(VecValue, VecPtr);
2655 
2656       // The pointer operand uses an in-tree scalar so we add the new BitCast to
2657       // ExternalUses list to make sure that an extract will be generated in the
2658       // future.
2659       if (ScalarToTreeEntry.count(SI->getPointerOperand()))
2660         ExternalUses.push_back(
2661             ExternalUser(SI->getPointerOperand(), cast<User>(VecPtr), 0));
2662 
2663       if (!Alignment) {
2664         Alignment = DL->getABITypeAlignment(SI->getValueOperand()->getType());
2665       }
2666       S->setAlignment(Alignment);
2667       E->VectorizedValue = S;
2668       ++NumVectorInstructions;
2669       return propagateMetadata(S, E->Scalars);
2670     }
2671     case Instruction::GetElementPtr: {
2672       setInsertPointAfterBundle(E->Scalars);
2673 
2674       ValueList Op0VL;
2675       for (Value *V : E->Scalars)
2676         Op0VL.push_back(cast<GetElementPtrInst>(V)->getOperand(0));
2677 
2678       Value *Op0 = vectorizeTree(Op0VL);
2679 
2680       std::vector<Value *> OpVecs;
2681       for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e;
2682            ++j) {
2683         ValueList OpVL;
2684         for (Value *V : E->Scalars)
2685           OpVL.push_back(cast<GetElementPtrInst>(V)->getOperand(j));
2686 
2687         Value *OpVec = vectorizeTree(OpVL);
2688         OpVecs.push_back(OpVec);
2689       }
2690 
2691       Value *V = Builder.CreateGEP(
2692           cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs);
2693       E->VectorizedValue = V;
2694       ++NumVectorInstructions;
2695 
2696       if (Instruction *I = dyn_cast<Instruction>(V))
2697         return propagateMetadata(I, E->Scalars);
2698 
2699       return V;
2700     }
2701     case Instruction::Call: {
2702       CallInst *CI = cast<CallInst>(VL0);
2703       setInsertPointAfterBundle(E->Scalars);
2704       Function *FI;
2705       Intrinsic::ID IID  = Intrinsic::not_intrinsic;
2706       Value *ScalarArg = nullptr;
2707       if (CI && (FI = CI->getCalledFunction())) {
2708         IID = FI->getIntrinsicID();
2709       }
2710       std::vector<Value *> OpVecs;
2711       for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) {
2712         ValueList OpVL;
2713         // ctlz,cttz and powi are special intrinsics whose second argument is
2714         // a scalar. This argument should not be vectorized.
2715         if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) {
2716           CallInst *CEI = cast<CallInst>(E->Scalars[0]);
2717           ScalarArg = CEI->getArgOperand(j);
2718           OpVecs.push_back(CEI->getArgOperand(j));
2719           continue;
2720         }
2721         for (Value *V : E->Scalars) {
2722           CallInst *CEI = cast<CallInst>(V);
2723           OpVL.push_back(CEI->getArgOperand(j));
2724         }
2725 
2726         Value *OpVec = vectorizeTree(OpVL);
2727         DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n");
2728         OpVecs.push_back(OpVec);
2729       }
2730 
2731       Module *M = F->getParent();
2732       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
2733       Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) };
2734       Function *CF = Intrinsic::getDeclaration(M, ID, Tys);
2735       SmallVector<OperandBundleDef, 1> OpBundles;
2736       CI->getOperandBundlesAsDefs(OpBundles);
2737       Value *V = Builder.CreateCall(CF, OpVecs, OpBundles);
2738 
2739       // The scalar argument uses an in-tree scalar so we add the new vectorized
2740       // call to ExternalUses list to make sure that an extract will be
2741       // generated in the future.
2742       if (ScalarArg && ScalarToTreeEntry.count(ScalarArg))
2743         ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0));
2744 
2745       E->VectorizedValue = V;
2746       propagateIRFlags(E->VectorizedValue, E->Scalars);
2747       ++NumVectorInstructions;
2748       return V;
2749     }
2750     case Instruction::ShuffleVector: {
2751       ValueList LHSVL, RHSVL;
2752       assert(isa<BinaryOperator>(VL0) && "Invalid Shuffle Vector Operand");
2753       reorderAltShuffleOperands(E->Scalars, LHSVL, RHSVL);
2754       setInsertPointAfterBundle(E->Scalars);
2755 
2756       Value *LHS = vectorizeTree(LHSVL);
2757       Value *RHS = vectorizeTree(RHSVL);
2758 
2759       if (Value *V = alreadyVectorized(E->Scalars))
2760         return V;
2761 
2762       // Create a vector of LHS op1 RHS
2763       BinaryOperator *BinOp0 = cast<BinaryOperator>(VL0);
2764       Value *V0 = Builder.CreateBinOp(BinOp0->getOpcode(), LHS, RHS);
2765 
2766       // Create a vector of LHS op2 RHS
2767       Instruction *VL1 = cast<Instruction>(E->Scalars[1]);
2768       BinaryOperator *BinOp1 = cast<BinaryOperator>(VL1);
2769       Value *V1 = Builder.CreateBinOp(BinOp1->getOpcode(), LHS, RHS);
2770 
2771       // Create shuffle to take alternate operations from the vector.
2772       // Also, gather up odd and even scalar ops to propagate IR flags to
2773       // each vector operation.
2774       ValueList OddScalars, EvenScalars;
2775       unsigned e = E->Scalars.size();
2776       SmallVector<Constant *, 8> Mask(e);
2777       for (unsigned i = 0; i < e; ++i) {
2778         if (i & 1) {
2779           Mask[i] = Builder.getInt32(e + i);
2780           OddScalars.push_back(E->Scalars[i]);
2781         } else {
2782           Mask[i] = Builder.getInt32(i);
2783           EvenScalars.push_back(E->Scalars[i]);
2784         }
2785       }
2786 
2787       Value *ShuffleMask = ConstantVector::get(Mask);
2788       propagateIRFlags(V0, EvenScalars);
2789       propagateIRFlags(V1, OddScalars);
2790 
2791       Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
2792       E->VectorizedValue = V;
2793       ++NumVectorInstructions;
2794       if (Instruction *I = dyn_cast<Instruction>(V))
2795         return propagateMetadata(I, E->Scalars);
2796 
2797       return V;
2798     }
2799     default:
2800     llvm_unreachable("unknown inst");
2801   }
2802   return nullptr;
2803 }
2804 
2805 Value *BoUpSLP::vectorizeTree() {
2806   ExtraValueToDebugLocsMap ExternallyUsedValues;
2807   return vectorizeTree(ExternallyUsedValues);
2808 }
2809 
2810 Value *
2811 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) {
2812 
2813   // All blocks must be scheduled before any instructions are inserted.
2814   for (auto &BSIter : BlocksSchedules) {
2815     scheduleBlock(BSIter.second.get());
2816   }
2817 
2818   Builder.SetInsertPoint(&F->getEntryBlock().front());
2819   auto *VectorRoot = vectorizeTree(ArrayRef<Value *>(), &VectorizableTree[0]);
2820 
2821   // If the vectorized tree can be rewritten in a smaller type, we truncate the
2822   // vectorized root. InstCombine will then rewrite the entire expression. We
2823   // sign extend the extracted values below.
2824   auto *ScalarRoot = VectorizableTree[0].Scalars[0];
2825   if (MinBWs.count(ScalarRoot)) {
2826     if (auto *I = dyn_cast<Instruction>(VectorRoot))
2827       Builder.SetInsertPoint(&*++BasicBlock::iterator(I));
2828     auto BundleWidth = VectorizableTree[0].Scalars.size();
2829     auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
2830     auto *VecTy = VectorType::get(MinTy, BundleWidth);
2831     auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy);
2832     VectorizableTree[0].VectorizedValue = Trunc;
2833   }
2834 
2835   DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n");
2836 
2837   // If necessary, sign-extend or zero-extend ScalarRoot to the larger type
2838   // specified by ScalarType.
2839   auto extend = [&](Value *ScalarRoot, Value *Ex, Type *ScalarType) {
2840     if (!MinBWs.count(ScalarRoot))
2841       return Ex;
2842     if (MinBWs[ScalarRoot].second)
2843       return Builder.CreateSExt(Ex, ScalarType);
2844     return Builder.CreateZExt(Ex, ScalarType);
2845   };
2846 
2847   // Extract all of the elements with the external uses.
2848   for (const auto &ExternalUse : ExternalUses) {
2849     Value *Scalar = ExternalUse.Scalar;
2850     llvm::User *User = ExternalUse.User;
2851 
2852     // Skip users that we already RAUW. This happens when one instruction
2853     // has multiple uses of the same value.
2854     if (User && !is_contained(Scalar->users(), User))
2855       continue;
2856     assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar");
2857 
2858     int Idx = ScalarToTreeEntry[Scalar];
2859     TreeEntry *E = &VectorizableTree[Idx];
2860     assert(!E->NeedToGather && "Extracting from a gather list");
2861 
2862     Value *Vec = E->VectorizedValue;
2863     assert(Vec && "Can't find vectorizable value");
2864 
2865     Value *Lane = Builder.getInt32(ExternalUse.Lane);
2866     // If User == nullptr, the Scalar is used as extra arg. Generate
2867     // ExtractElement instruction and update the record for this scalar in
2868     // ExternallyUsedValues.
2869     if (!User) {
2870       assert(ExternallyUsedValues.count(Scalar) &&
2871              "Scalar with nullptr as an external user must be registered in "
2872              "ExternallyUsedValues map");
2873       if (auto *VecI = dyn_cast<Instruction>(Vec)) {
2874         Builder.SetInsertPoint(VecI->getParent(),
2875                                std::next(VecI->getIterator()));
2876       } else {
2877         Builder.SetInsertPoint(&F->getEntryBlock().front());
2878       }
2879       Value *Ex = Builder.CreateExtractElement(Vec, Lane);
2880       Ex = extend(ScalarRoot, Ex, Scalar->getType());
2881       CSEBlocks.insert(cast<Instruction>(Scalar)->getParent());
2882       auto &Locs = ExternallyUsedValues[Scalar];
2883       ExternallyUsedValues.insert({Ex, Locs});
2884       ExternallyUsedValues.erase(Scalar);
2885       continue;
2886     }
2887 
2888     // Generate extracts for out-of-tree users.
2889     // Find the insertion point for the extractelement lane.
2890     if (auto *VecI = dyn_cast<Instruction>(Vec)) {
2891       if (PHINode *PH = dyn_cast<PHINode>(User)) {
2892         for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) {
2893           if (PH->getIncomingValue(i) == Scalar) {
2894             TerminatorInst *IncomingTerminator =
2895                 PH->getIncomingBlock(i)->getTerminator();
2896             if (isa<CatchSwitchInst>(IncomingTerminator)) {
2897               Builder.SetInsertPoint(VecI->getParent(),
2898                                      std::next(VecI->getIterator()));
2899             } else {
2900               Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator());
2901             }
2902             Value *Ex = Builder.CreateExtractElement(Vec, Lane);
2903             Ex = extend(ScalarRoot, Ex, Scalar->getType());
2904             CSEBlocks.insert(PH->getIncomingBlock(i));
2905             PH->setOperand(i, Ex);
2906           }
2907         }
2908       } else {
2909         Builder.SetInsertPoint(cast<Instruction>(User));
2910         Value *Ex = Builder.CreateExtractElement(Vec, Lane);
2911         Ex = extend(ScalarRoot, Ex, Scalar->getType());
2912         CSEBlocks.insert(cast<Instruction>(User)->getParent());
2913         User->replaceUsesOfWith(Scalar, Ex);
2914      }
2915     } else {
2916       Builder.SetInsertPoint(&F->getEntryBlock().front());
2917       Value *Ex = Builder.CreateExtractElement(Vec, Lane);
2918       Ex = extend(ScalarRoot, Ex, Scalar->getType());
2919       CSEBlocks.insert(&F->getEntryBlock());
2920       User->replaceUsesOfWith(Scalar, Ex);
2921     }
2922 
2923     DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n");
2924   }
2925 
2926   // For each vectorized value:
2927   for (TreeEntry &EIdx : VectorizableTree) {
2928     TreeEntry *Entry = &EIdx;
2929 
2930     // For each lane:
2931     for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
2932       Value *Scalar = Entry->Scalars[Lane];
2933       // No need to handle users of gathered values.
2934       if (Entry->NeedToGather)
2935         continue;
2936 
2937       assert(Entry->VectorizedValue && "Can't find vectorizable value");
2938 
2939       Type *Ty = Scalar->getType();
2940       if (!Ty->isVoidTy()) {
2941 #ifndef NDEBUG
2942         for (User *U : Scalar->users()) {
2943           DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
2944 
2945           assert((ScalarToTreeEntry.count(U) ||
2946                   // It is legal to replace users in the ignorelist by undef.
2947                   is_contained(UserIgnoreList, U)) &&
2948                  "Replacing out-of-tree value with undef");
2949         }
2950 #endif
2951         Value *Undef = UndefValue::get(Ty);
2952         Scalar->replaceAllUsesWith(Undef);
2953       }
2954       DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
2955       eraseInstruction(cast<Instruction>(Scalar));
2956     }
2957   }
2958 
2959   Builder.ClearInsertionPoint();
2960 
2961   return VectorizableTree[0].VectorizedValue;
2962 }
2963 
2964 void BoUpSLP::optimizeGatherSequence() {
2965   DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size()
2966         << " gather sequences instructions.\n");
2967   // LICM InsertElementInst sequences.
2968   for (Instruction *it : GatherSeq) {
2969     InsertElementInst *Insert = dyn_cast<InsertElementInst>(it);
2970 
2971     if (!Insert)
2972       continue;
2973 
2974     // Check if this block is inside a loop.
2975     Loop *L = LI->getLoopFor(Insert->getParent());
2976     if (!L)
2977       continue;
2978 
2979     // Check if it has a preheader.
2980     BasicBlock *PreHeader = L->getLoopPreheader();
2981     if (!PreHeader)
2982       continue;
2983 
2984     // If the vector or the element that we insert into it are
2985     // instructions that are defined in this basic block then we can't
2986     // hoist this instruction.
2987     Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0));
2988     Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1));
2989     if (CurrVec && L->contains(CurrVec))
2990       continue;
2991     if (NewElem && L->contains(NewElem))
2992       continue;
2993 
2994     // We can hoist this instruction. Move it to the pre-header.
2995     Insert->moveBefore(PreHeader->getTerminator());
2996   }
2997 
2998   // Make a list of all reachable blocks in our CSE queue.
2999   SmallVector<const DomTreeNode *, 8> CSEWorkList;
3000   CSEWorkList.reserve(CSEBlocks.size());
3001   for (BasicBlock *BB : CSEBlocks)
3002     if (DomTreeNode *N = DT->getNode(BB)) {
3003       assert(DT->isReachableFromEntry(N));
3004       CSEWorkList.push_back(N);
3005     }
3006 
3007   // Sort blocks by domination. This ensures we visit a block after all blocks
3008   // dominating it are visited.
3009   std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(),
3010                    [this](const DomTreeNode *A, const DomTreeNode *B) {
3011     return DT->properlyDominates(A, B);
3012   });
3013 
3014   // Perform O(N^2) search over the gather sequences and merge identical
3015   // instructions. TODO: We can further optimize this scan if we split the
3016   // instructions into different buckets based on the insert lane.
3017   SmallVector<Instruction *, 16> Visited;
3018   for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) {
3019     assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&
3020            "Worklist not sorted properly!");
3021     BasicBlock *BB = (*I)->getBlock();
3022     // For all instructions in blocks containing gather sequences:
3023     for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) {
3024       Instruction *In = &*it++;
3025       if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In))
3026         continue;
3027 
3028       // Check if we can replace this instruction with any of the
3029       // visited instructions.
3030       for (Instruction *v : Visited) {
3031         if (In->isIdenticalTo(v) &&
3032             DT->dominates(v->getParent(), In->getParent())) {
3033           In->replaceAllUsesWith(v);
3034           eraseInstruction(In);
3035           In = nullptr;
3036           break;
3037         }
3038       }
3039       if (In) {
3040         assert(!is_contained(Visited, In));
3041         Visited.push_back(In);
3042       }
3043     }
3044   }
3045   CSEBlocks.clear();
3046   GatherSeq.clear();
3047 }
3048 
3049 // Groups the instructions to a bundle (which is then a single scheduling entity)
3050 // and schedules instructions until the bundle gets ready.
3051 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL,
3052                                                  BoUpSLP *SLP) {
3053   if (isa<PHINode>(VL[0]))
3054     return true;
3055 
3056   // Initialize the instruction bundle.
3057   Instruction *OldScheduleEnd = ScheduleEnd;
3058   ScheduleData *PrevInBundle = nullptr;
3059   ScheduleData *Bundle = nullptr;
3060   bool ReSchedule = false;
3061   DEBUG(dbgs() << "SLP:  bundle: " << *VL[0] << "\n");
3062 
3063   // Make sure that the scheduling region contains all
3064   // instructions of the bundle.
3065   for (Value *V : VL) {
3066     if (!extendSchedulingRegion(V))
3067       return false;
3068   }
3069 
3070   for (Value *V : VL) {
3071     ScheduleData *BundleMember = getScheduleData(V);
3072     assert(BundleMember &&
3073            "no ScheduleData for bundle member (maybe not in same basic block)");
3074     if (BundleMember->IsScheduled) {
3075       // A bundle member was scheduled as single instruction before and now
3076       // needs to be scheduled as part of the bundle. We just get rid of the
3077       // existing schedule.
3078       DEBUG(dbgs() << "SLP:  reset schedule because " << *BundleMember
3079                    << " was already scheduled\n");
3080       ReSchedule = true;
3081     }
3082     assert(BundleMember->isSchedulingEntity() &&
3083            "bundle member already part of other bundle");
3084     if (PrevInBundle) {
3085       PrevInBundle->NextInBundle = BundleMember;
3086     } else {
3087       Bundle = BundleMember;
3088     }
3089     BundleMember->UnscheduledDepsInBundle = 0;
3090     Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps;
3091 
3092     // Group the instructions to a bundle.
3093     BundleMember->FirstInBundle = Bundle;
3094     PrevInBundle = BundleMember;
3095   }
3096   if (ScheduleEnd != OldScheduleEnd) {
3097     // The scheduling region got new instructions at the lower end (or it is a
3098     // new region for the first bundle). This makes it necessary to
3099     // recalculate all dependencies.
3100     // It is seldom that this needs to be done a second time after adding the
3101     // initial bundle to the region.
3102     for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
3103       ScheduleData *SD = getScheduleData(I);
3104       SD->clearDependencies();
3105     }
3106     ReSchedule = true;
3107   }
3108   if (ReSchedule) {
3109     resetSchedule();
3110     initialFillReadyList(ReadyInsts);
3111   }
3112 
3113   DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block "
3114                << BB->getName() << "\n");
3115 
3116   calculateDependencies(Bundle, true, SLP);
3117 
3118   // Now try to schedule the new bundle. As soon as the bundle is "ready" it
3119   // means that there are no cyclic dependencies and we can schedule it.
3120   // Note that's important that we don't "schedule" the bundle yet (see
3121   // cancelScheduling).
3122   while (!Bundle->isReady() && !ReadyInsts.empty()) {
3123 
3124     ScheduleData *pickedSD = ReadyInsts.back();
3125     ReadyInsts.pop_back();
3126 
3127     if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) {
3128       schedule(pickedSD, ReadyInsts);
3129     }
3130   }
3131   if (!Bundle->isReady()) {
3132     cancelScheduling(VL);
3133     return false;
3134   }
3135   return true;
3136 }
3137 
3138 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL) {
3139   if (isa<PHINode>(VL[0]))
3140     return;
3141 
3142   ScheduleData *Bundle = getScheduleData(VL[0]);
3143   DEBUG(dbgs() << "SLP:  cancel scheduling of " << *Bundle << "\n");
3144   assert(!Bundle->IsScheduled &&
3145          "Can't cancel bundle which is already scheduled");
3146   assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() &&
3147          "tried to unbundle something which is not a bundle");
3148 
3149   // Un-bundle: make single instructions out of the bundle.
3150   ScheduleData *BundleMember = Bundle;
3151   while (BundleMember) {
3152     assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links");
3153     BundleMember->FirstInBundle = BundleMember;
3154     ScheduleData *Next = BundleMember->NextInBundle;
3155     BundleMember->NextInBundle = nullptr;
3156     BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps;
3157     if (BundleMember->UnscheduledDepsInBundle == 0) {
3158       ReadyInsts.insert(BundleMember);
3159     }
3160     BundleMember = Next;
3161   }
3162 }
3163 
3164 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V) {
3165   if (getScheduleData(V))
3166     return true;
3167   Instruction *I = dyn_cast<Instruction>(V);
3168   assert(I && "bundle member must be an instruction");
3169   assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled");
3170   if (!ScheduleStart) {
3171     // It's the first instruction in the new region.
3172     initScheduleData(I, I->getNextNode(), nullptr, nullptr);
3173     ScheduleStart = I;
3174     ScheduleEnd = I->getNextNode();
3175     assert(ScheduleEnd && "tried to vectorize a TerminatorInst?");
3176     DEBUG(dbgs() << "SLP:  initialize schedule region to " << *I << "\n");
3177     return true;
3178   }
3179   // Search up and down at the same time, because we don't know if the new
3180   // instruction is above or below the existing scheduling region.
3181   BasicBlock::reverse_iterator UpIter =
3182       ++ScheduleStart->getIterator().getReverse();
3183   BasicBlock::reverse_iterator UpperEnd = BB->rend();
3184   BasicBlock::iterator DownIter = ScheduleEnd->getIterator();
3185   BasicBlock::iterator LowerEnd = BB->end();
3186   for (;;) {
3187     if (++ScheduleRegionSize > ScheduleRegionSizeLimit) {
3188       DEBUG(dbgs() << "SLP:  exceeded schedule region size limit\n");
3189       return false;
3190     }
3191 
3192     if (UpIter != UpperEnd) {
3193       if (&*UpIter == I) {
3194         initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion);
3195         ScheduleStart = I;
3196         DEBUG(dbgs() << "SLP:  extend schedule region start to " << *I << "\n");
3197         return true;
3198       }
3199       UpIter++;
3200     }
3201     if (DownIter != LowerEnd) {
3202       if (&*DownIter == I) {
3203         initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion,
3204                          nullptr);
3205         ScheduleEnd = I->getNextNode();
3206         assert(ScheduleEnd && "tried to vectorize a TerminatorInst?");
3207         DEBUG(dbgs() << "SLP:  extend schedule region end to " << *I << "\n");
3208         return true;
3209       }
3210       DownIter++;
3211     }
3212     assert((UpIter != UpperEnd || DownIter != LowerEnd) &&
3213            "instruction not found in block");
3214   }
3215   return true;
3216 }
3217 
3218 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI,
3219                                                 Instruction *ToI,
3220                                                 ScheduleData *PrevLoadStore,
3221                                                 ScheduleData *NextLoadStore) {
3222   ScheduleData *CurrentLoadStore = PrevLoadStore;
3223   for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) {
3224     ScheduleData *SD = ScheduleDataMap[I];
3225     if (!SD) {
3226       // Allocate a new ScheduleData for the instruction.
3227       if (ChunkPos >= ChunkSize) {
3228         ScheduleDataChunks.push_back(
3229             llvm::make_unique<ScheduleData[]>(ChunkSize));
3230         ChunkPos = 0;
3231       }
3232       SD = &(ScheduleDataChunks.back()[ChunkPos++]);
3233       ScheduleDataMap[I] = SD;
3234       SD->Inst = I;
3235     }
3236     assert(!isInSchedulingRegion(SD) &&
3237            "new ScheduleData already in scheduling region");
3238     SD->init(SchedulingRegionID);
3239 
3240     if (I->mayReadOrWriteMemory()) {
3241       // Update the linked list of memory accessing instructions.
3242       if (CurrentLoadStore) {
3243         CurrentLoadStore->NextLoadStore = SD;
3244       } else {
3245         FirstLoadStoreInRegion = SD;
3246       }
3247       CurrentLoadStore = SD;
3248     }
3249   }
3250   if (NextLoadStore) {
3251     if (CurrentLoadStore)
3252       CurrentLoadStore->NextLoadStore = NextLoadStore;
3253   } else {
3254     LastLoadStoreInRegion = CurrentLoadStore;
3255   }
3256 }
3257 
3258 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
3259                                                      bool InsertInReadyList,
3260                                                      BoUpSLP *SLP) {
3261   assert(SD->isSchedulingEntity());
3262 
3263   SmallVector<ScheduleData *, 10> WorkList;
3264   WorkList.push_back(SD);
3265 
3266   while (!WorkList.empty()) {
3267     ScheduleData *SD = WorkList.back();
3268     WorkList.pop_back();
3269 
3270     ScheduleData *BundleMember = SD;
3271     while (BundleMember) {
3272       assert(isInSchedulingRegion(BundleMember));
3273       if (!BundleMember->hasValidDependencies()) {
3274 
3275         DEBUG(dbgs() << "SLP:       update deps of " << *BundleMember << "\n");
3276         BundleMember->Dependencies = 0;
3277         BundleMember->resetUnscheduledDeps();
3278 
3279         // Handle def-use chain dependencies.
3280         for (User *U : BundleMember->Inst->users()) {
3281           if (isa<Instruction>(U)) {
3282             ScheduleData *UseSD = getScheduleData(U);
3283             if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
3284               BundleMember->Dependencies++;
3285               ScheduleData *DestBundle = UseSD->FirstInBundle;
3286               if (!DestBundle->IsScheduled) {
3287                 BundleMember->incrementUnscheduledDeps(1);
3288               }
3289               if (!DestBundle->hasValidDependencies()) {
3290                 WorkList.push_back(DestBundle);
3291               }
3292             }
3293           } else {
3294             // I'm not sure if this can ever happen. But we need to be safe.
3295             // This lets the instruction/bundle never be scheduled and
3296             // eventually disable vectorization.
3297             BundleMember->Dependencies++;
3298             BundleMember->incrementUnscheduledDeps(1);
3299           }
3300         }
3301 
3302         // Handle the memory dependencies.
3303         ScheduleData *DepDest = BundleMember->NextLoadStore;
3304         if (DepDest) {
3305           Instruction *SrcInst = BundleMember->Inst;
3306           MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA);
3307           bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
3308           unsigned numAliased = 0;
3309           unsigned DistToSrc = 1;
3310 
3311           while (DepDest) {
3312             assert(isInSchedulingRegion(DepDest));
3313 
3314             // We have two limits to reduce the complexity:
3315             // 1) AliasedCheckLimit: It's a small limit to reduce calls to
3316             //    SLP->isAliased (which is the expensive part in this loop).
3317             // 2) MaxMemDepDistance: It's for very large blocks and it aborts
3318             //    the whole loop (even if the loop is fast, it's quadratic).
3319             //    It's important for the loop break condition (see below) to
3320             //    check this limit even between two read-only instructions.
3321             if (DistToSrc >= MaxMemDepDistance ||
3322                     ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
3323                      (numAliased >= AliasedCheckLimit ||
3324                       SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
3325 
3326               // We increment the counter only if the locations are aliased
3327               // (instead of counting all alias checks). This gives a better
3328               // balance between reduced runtime and accurate dependencies.
3329               numAliased++;
3330 
3331               DepDest->MemoryDependencies.push_back(BundleMember);
3332               BundleMember->Dependencies++;
3333               ScheduleData *DestBundle = DepDest->FirstInBundle;
3334               if (!DestBundle->IsScheduled) {
3335                 BundleMember->incrementUnscheduledDeps(1);
3336               }
3337               if (!DestBundle->hasValidDependencies()) {
3338                 WorkList.push_back(DestBundle);
3339               }
3340             }
3341             DepDest = DepDest->NextLoadStore;
3342 
3343             // Example, explaining the loop break condition: Let's assume our
3344             // starting instruction is i0 and MaxMemDepDistance = 3.
3345             //
3346             //                      +--------v--v--v
3347             //             i0,i1,i2,i3,i4,i5,i6,i7,i8
3348             //             +--------^--^--^
3349             //
3350             // MaxMemDepDistance let us stop alias-checking at i3 and we add
3351             // dependencies from i0 to i3,i4,.. (even if they are not aliased).
3352             // Previously we already added dependencies from i3 to i6,i7,i8
3353             // (because of MaxMemDepDistance). As we added a dependency from
3354             // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
3355             // and we can abort this loop at i6.
3356             if (DistToSrc >= 2 * MaxMemDepDistance)
3357                 break;
3358             DistToSrc++;
3359           }
3360         }
3361       }
3362       BundleMember = BundleMember->NextInBundle;
3363     }
3364     if (InsertInReadyList && SD->isReady()) {
3365       ReadyInsts.push_back(SD);
3366       DEBUG(dbgs() << "SLP:     gets ready on update: " << *SD->Inst << "\n");
3367     }
3368   }
3369 }
3370 
3371 void BoUpSLP::BlockScheduling::resetSchedule() {
3372   assert(ScheduleStart &&
3373          "tried to reset schedule on block which has not been scheduled");
3374   for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
3375     ScheduleData *SD = getScheduleData(I);
3376     assert(isInSchedulingRegion(SD));
3377     SD->IsScheduled = false;
3378     SD->resetUnscheduledDeps();
3379   }
3380   ReadyInsts.clear();
3381 }
3382 
3383 void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
3384 
3385   if (!BS->ScheduleStart)
3386     return;
3387 
3388   DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n");
3389 
3390   BS->resetSchedule();
3391 
3392   // For the real scheduling we use a more sophisticated ready-list: it is
3393   // sorted by the original instruction location. This lets the final schedule
3394   // be as  close as possible to the original instruction order.
3395   struct ScheduleDataCompare {
3396     bool operator()(ScheduleData *SD1, ScheduleData *SD2) const {
3397       return SD2->SchedulingPriority < SD1->SchedulingPriority;
3398     }
3399   };
3400   std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts;
3401 
3402   // Ensure that all dependency data is updated and fill the ready-list with
3403   // initial instructions.
3404   int Idx = 0;
3405   int NumToSchedule = 0;
3406   for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd;
3407        I = I->getNextNode()) {
3408     ScheduleData *SD = BS->getScheduleData(I);
3409     assert(
3410         SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD->Inst) != 0) &&
3411         "scheduler and vectorizer have different opinion on what is a bundle");
3412     SD->FirstInBundle->SchedulingPriority = Idx++;
3413     if (SD->isSchedulingEntity()) {
3414       BS->calculateDependencies(SD, false, this);
3415       NumToSchedule++;
3416     }
3417   }
3418   BS->initialFillReadyList(ReadyInsts);
3419 
3420   Instruction *LastScheduledInst = BS->ScheduleEnd;
3421 
3422   // Do the "real" scheduling.
3423   while (!ReadyInsts.empty()) {
3424     ScheduleData *picked = *ReadyInsts.begin();
3425     ReadyInsts.erase(ReadyInsts.begin());
3426 
3427     // Move the scheduled instruction(s) to their dedicated places, if not
3428     // there yet.
3429     ScheduleData *BundleMember = picked;
3430     while (BundleMember) {
3431       Instruction *pickedInst = BundleMember->Inst;
3432       if (LastScheduledInst->getNextNode() != pickedInst) {
3433         BS->BB->getInstList().remove(pickedInst);
3434         BS->BB->getInstList().insert(LastScheduledInst->getIterator(),
3435                                      pickedInst);
3436       }
3437       LastScheduledInst = pickedInst;
3438       BundleMember = BundleMember->NextInBundle;
3439     }
3440 
3441     BS->schedule(picked, ReadyInsts);
3442     NumToSchedule--;
3443   }
3444   assert(NumToSchedule == 0 && "could not schedule all instructions");
3445 
3446   // Avoid duplicate scheduling of the block.
3447   BS->ScheduleStart = nullptr;
3448 }
3449 
3450 unsigned BoUpSLP::getVectorElementSize(Value *V) {
3451   // If V is a store, just return the width of the stored value without
3452   // traversing the expression tree. This is the common case.
3453   if (auto *Store = dyn_cast<StoreInst>(V))
3454     return DL->getTypeSizeInBits(Store->getValueOperand()->getType());
3455 
3456   // If V is not a store, we can traverse the expression tree to find loads
3457   // that feed it. The type of the loaded value may indicate a more suitable
3458   // width than V's type. We want to base the vector element size on the width
3459   // of memory operations where possible.
3460   SmallVector<Instruction *, 16> Worklist;
3461   SmallPtrSet<Instruction *, 16> Visited;
3462   if (auto *I = dyn_cast<Instruction>(V))
3463     Worklist.push_back(I);
3464 
3465   // Traverse the expression tree in bottom-up order looking for loads. If we
3466   // encounter an instruciton we don't yet handle, we give up.
3467   auto MaxWidth = 0u;
3468   auto FoundUnknownInst = false;
3469   while (!Worklist.empty() && !FoundUnknownInst) {
3470     auto *I = Worklist.pop_back_val();
3471     Visited.insert(I);
3472 
3473     // We should only be looking at scalar instructions here. If the current
3474     // instruction has a vector type, give up.
3475     auto *Ty = I->getType();
3476     if (isa<VectorType>(Ty))
3477       FoundUnknownInst = true;
3478 
3479     // If the current instruction is a load, update MaxWidth to reflect the
3480     // width of the loaded value.
3481     else if (isa<LoadInst>(I))
3482       MaxWidth = std::max<unsigned>(MaxWidth, DL->getTypeSizeInBits(Ty));
3483 
3484     // Otherwise, we need to visit the operands of the instruction. We only
3485     // handle the interesting cases from buildTree here. If an operand is an
3486     // instruction we haven't yet visited, we add it to the worklist.
3487     else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
3488              isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I)) {
3489       for (Use &U : I->operands())
3490         if (auto *J = dyn_cast<Instruction>(U.get()))
3491           if (!Visited.count(J))
3492             Worklist.push_back(J);
3493     }
3494 
3495     // If we don't yet handle the instruction, give up.
3496     else
3497       FoundUnknownInst = true;
3498   }
3499 
3500   // If we didn't encounter a memory access in the expression tree, or if we
3501   // gave up for some reason, just return the width of V.
3502   if (!MaxWidth || FoundUnknownInst)
3503     return DL->getTypeSizeInBits(V->getType());
3504 
3505   // Otherwise, return the maximum width we found.
3506   return MaxWidth;
3507 }
3508 
3509 // Determine if a value V in a vectorizable expression Expr can be demoted to a
3510 // smaller type with a truncation. We collect the values that will be demoted
3511 // in ToDemote and additional roots that require investigating in Roots.
3512 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr,
3513                                   SmallVectorImpl<Value *> &ToDemote,
3514                                   SmallVectorImpl<Value *> &Roots) {
3515 
3516   // We can always demote constants.
3517   if (isa<Constant>(V)) {
3518     ToDemote.push_back(V);
3519     return true;
3520   }
3521 
3522   // If the value is not an instruction in the expression with only one use, it
3523   // cannot be demoted.
3524   auto *I = dyn_cast<Instruction>(V);
3525   if (!I || !I->hasOneUse() || !Expr.count(I))
3526     return false;
3527 
3528   switch (I->getOpcode()) {
3529 
3530   // We can always demote truncations and extensions. Since truncations can
3531   // seed additional demotion, we save the truncated value.
3532   case Instruction::Trunc:
3533     Roots.push_back(I->getOperand(0));
3534   case Instruction::ZExt:
3535   case Instruction::SExt:
3536     break;
3537 
3538   // We can demote certain binary operations if we can demote both of their
3539   // operands.
3540   case Instruction::Add:
3541   case Instruction::Sub:
3542   case Instruction::Mul:
3543   case Instruction::And:
3544   case Instruction::Or:
3545   case Instruction::Xor:
3546     if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) ||
3547         !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots))
3548       return false;
3549     break;
3550 
3551   // We can demote selects if we can demote their true and false values.
3552   case Instruction::Select: {
3553     SelectInst *SI = cast<SelectInst>(I);
3554     if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) ||
3555         !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots))
3556       return false;
3557     break;
3558   }
3559 
3560   // We can demote phis if we can demote all their incoming operands. Note that
3561   // we don't need to worry about cycles since we ensure single use above.
3562   case Instruction::PHI: {
3563     PHINode *PN = cast<PHINode>(I);
3564     for (Value *IncValue : PN->incoming_values())
3565       if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots))
3566         return false;
3567     break;
3568   }
3569 
3570   // Otherwise, conservatively give up.
3571   default:
3572     return false;
3573   }
3574 
3575   // Record the value that we can demote.
3576   ToDemote.push_back(V);
3577   return true;
3578 }
3579 
3580 void BoUpSLP::computeMinimumValueSizes() {
3581   // If there are no external uses, the expression tree must be rooted by a
3582   // store. We can't demote in-memory values, so there is nothing to do here.
3583   if (ExternalUses.empty())
3584     return;
3585 
3586   // We only attempt to truncate integer expressions.
3587   auto &TreeRoot = VectorizableTree[0].Scalars;
3588   auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType());
3589   if (!TreeRootIT)
3590     return;
3591 
3592   // If the expression is not rooted by a store, these roots should have
3593   // external uses. We will rely on InstCombine to rewrite the expression in
3594   // the narrower type. However, InstCombine only rewrites single-use values.
3595   // This means that if a tree entry other than a root is used externally, it
3596   // must have multiple uses and InstCombine will not rewrite it. The code
3597   // below ensures that only the roots are used externally.
3598   SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end());
3599   for (auto &EU : ExternalUses)
3600     if (!Expr.erase(EU.Scalar))
3601       return;
3602   if (!Expr.empty())
3603     return;
3604 
3605   // Collect the scalar values of the vectorizable expression. We will use this
3606   // context to determine which values can be demoted. If we see a truncation,
3607   // we mark it as seeding another demotion.
3608   for (auto &Entry : VectorizableTree)
3609     Expr.insert(Entry.Scalars.begin(), Entry.Scalars.end());
3610 
3611   // Ensure the roots of the vectorizable tree don't form a cycle. They must
3612   // have a single external user that is not in the vectorizable tree.
3613   for (auto *Root : TreeRoot)
3614     if (!Root->hasOneUse() || Expr.count(*Root->user_begin()))
3615       return;
3616 
3617   // Conservatively determine if we can actually truncate the roots of the
3618   // expression. Collect the values that can be demoted in ToDemote and
3619   // additional roots that require investigating in Roots.
3620   SmallVector<Value *, 32> ToDemote;
3621   SmallVector<Value *, 4> Roots;
3622   for (auto *Root : TreeRoot)
3623     if (!collectValuesToDemote(Root, Expr, ToDemote, Roots))
3624       return;
3625 
3626   // The maximum bit width required to represent all the values that can be
3627   // demoted without loss of precision. It would be safe to truncate the roots
3628   // of the expression to this width.
3629   auto MaxBitWidth = 8u;
3630 
3631   // We first check if all the bits of the roots are demanded. If they're not,
3632   // we can truncate the roots to this narrower type.
3633   for (auto *Root : TreeRoot) {
3634     auto Mask = DB->getDemandedBits(cast<Instruction>(Root));
3635     MaxBitWidth = std::max<unsigned>(
3636         Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth);
3637   }
3638 
3639   // True if the roots can be zero-extended back to their original type, rather
3640   // than sign-extended. We know that if the leading bits are not demanded, we
3641   // can safely zero-extend. So we initialize IsKnownPositive to True.
3642   bool IsKnownPositive = true;
3643 
3644   // If all the bits of the roots are demanded, we can try a little harder to
3645   // compute a narrower type. This can happen, for example, if the roots are
3646   // getelementptr indices. InstCombine promotes these indices to the pointer
3647   // width. Thus, all their bits are technically demanded even though the
3648   // address computation might be vectorized in a smaller type.
3649   //
3650   // We start by looking at each entry that can be demoted. We compute the
3651   // maximum bit width required to store the scalar by using ValueTracking to
3652   // compute the number of high-order bits we can truncate.
3653   if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType())) {
3654     MaxBitWidth = 8u;
3655 
3656     // Determine if the sign bit of all the roots is known to be zero. If not,
3657     // IsKnownPositive is set to False.
3658     IsKnownPositive = all_of(TreeRoot, [&](Value *R) {
3659       bool KnownZero = false;
3660       bool KnownOne = false;
3661       ComputeSignBit(R, KnownZero, KnownOne, *DL);
3662       return KnownZero;
3663     });
3664 
3665     // Determine the maximum number of bits required to store the scalar
3666     // values.
3667     for (auto *Scalar : ToDemote) {
3668       auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, 0, DT);
3669       auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType());
3670       MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth);
3671     }
3672 
3673     // If we can't prove that the sign bit is zero, we must add one to the
3674     // maximum bit width to account for the unknown sign bit. This preserves
3675     // the existing sign bit so we can safely sign-extend the root back to the
3676     // original type. Otherwise, if we know the sign bit is zero, we will
3677     // zero-extend the root instead.
3678     //
3679     // FIXME: This is somewhat suboptimal, as there will be cases where adding
3680     //        one to the maximum bit width will yield a larger-than-necessary
3681     //        type. In general, we need to add an extra bit only if we can't
3682     //        prove that the upper bit of the original type is equal to the
3683     //        upper bit of the proposed smaller type. If these two bits are the
3684     //        same (either zero or one) we know that sign-extending from the
3685     //        smaller type will result in the same value. Here, since we can't
3686     //        yet prove this, we are just making the proposed smaller type
3687     //        larger to ensure correctness.
3688     if (!IsKnownPositive)
3689       ++MaxBitWidth;
3690   }
3691 
3692   // Round MaxBitWidth up to the next power-of-two.
3693   if (!isPowerOf2_64(MaxBitWidth))
3694     MaxBitWidth = NextPowerOf2(MaxBitWidth);
3695 
3696   // If the maximum bit width we compute is less than the with of the roots'
3697   // type, we can proceed with the narrowing. Otherwise, do nothing.
3698   if (MaxBitWidth >= TreeRootIT->getBitWidth())
3699     return;
3700 
3701   // If we can truncate the root, we must collect additional values that might
3702   // be demoted as a result. That is, those seeded by truncations we will
3703   // modify.
3704   while (!Roots.empty())
3705     collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots);
3706 
3707   // Finally, map the values we can demote to the maximum bit with we computed.
3708   for (auto *Scalar : ToDemote)
3709     MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive);
3710 }
3711 
3712 namespace {
3713 /// The SLPVectorizer Pass.
3714 struct SLPVectorizer : public FunctionPass {
3715   SLPVectorizerPass Impl;
3716 
3717   /// Pass identification, replacement for typeid
3718   static char ID;
3719 
3720   explicit SLPVectorizer() : FunctionPass(ID) {
3721     initializeSLPVectorizerPass(*PassRegistry::getPassRegistry());
3722   }
3723 
3724 
3725   bool doInitialization(Module &M) override {
3726     return false;
3727   }
3728 
3729   bool runOnFunction(Function &F) override {
3730     if (skipFunction(F))
3731       return false;
3732 
3733     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
3734     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
3735     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
3736     auto *TLI = TLIP ? &TLIP->getTLI() : nullptr;
3737     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3738     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
3739     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
3740     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
3741     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
3742 
3743     return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB);
3744   }
3745 
3746   void getAnalysisUsage(AnalysisUsage &AU) const override {
3747     FunctionPass::getAnalysisUsage(AU);
3748     AU.addRequired<AssumptionCacheTracker>();
3749     AU.addRequired<ScalarEvolutionWrapperPass>();
3750     AU.addRequired<AAResultsWrapperPass>();
3751     AU.addRequired<TargetTransformInfoWrapperPass>();
3752     AU.addRequired<LoopInfoWrapperPass>();
3753     AU.addRequired<DominatorTreeWrapperPass>();
3754     AU.addRequired<DemandedBitsWrapperPass>();
3755     AU.addPreserved<LoopInfoWrapperPass>();
3756     AU.addPreserved<DominatorTreeWrapperPass>();
3757     AU.addPreserved<AAResultsWrapperPass>();
3758     AU.addPreserved<GlobalsAAWrapperPass>();
3759     AU.setPreservesCFG();
3760   }
3761 };
3762 } // end anonymous namespace
3763 
3764 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) {
3765   auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
3766   auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
3767   auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F);
3768   auto *AA = &AM.getResult<AAManager>(F);
3769   auto *LI = &AM.getResult<LoopAnalysis>(F);
3770   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
3771   auto *AC = &AM.getResult<AssumptionAnalysis>(F);
3772   auto *DB = &AM.getResult<DemandedBitsAnalysis>(F);
3773 
3774   bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB);
3775   if (!Changed)
3776     return PreservedAnalyses::all();
3777 
3778   PreservedAnalyses PA;
3779   PA.preserveSet<CFGAnalyses>();
3780   PA.preserve<AAManager>();
3781   PA.preserve<GlobalsAA>();
3782   return PA;
3783 }
3784 
3785 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
3786                                 TargetTransformInfo *TTI_,
3787                                 TargetLibraryInfo *TLI_, AliasAnalysis *AA_,
3788                                 LoopInfo *LI_, DominatorTree *DT_,
3789                                 AssumptionCache *AC_, DemandedBits *DB_) {
3790   SE = SE_;
3791   TTI = TTI_;
3792   TLI = TLI_;
3793   AA = AA_;
3794   LI = LI_;
3795   DT = DT_;
3796   AC = AC_;
3797   DB = DB_;
3798   DL = &F.getParent()->getDataLayout();
3799 
3800   Stores.clear();
3801   GEPs.clear();
3802   bool Changed = false;
3803 
3804   // If the target claims to have no vector registers don't attempt
3805   // vectorization.
3806   if (!TTI->getNumberOfRegisters(true))
3807     return false;
3808 
3809   // Don't vectorize when the attribute NoImplicitFloat is used.
3810   if (F.hasFnAttribute(Attribute::NoImplicitFloat))
3811     return false;
3812 
3813   DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
3814 
3815   // Use the bottom up slp vectorizer to construct chains that start with
3816   // store instructions.
3817   BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL);
3818 
3819   // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to
3820   // delete instructions.
3821 
3822   // Scan the blocks in the function in post order.
3823   for (auto BB : post_order(&F.getEntryBlock())) {
3824     collectSeedInstructions(BB);
3825 
3826     // Vectorize trees that end at stores.
3827     if (!Stores.empty()) {
3828       DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()
3829                    << " underlying objects.\n");
3830       Changed |= vectorizeStoreChains(R);
3831     }
3832 
3833     // Vectorize trees that end at reductions.
3834     Changed |= vectorizeChainsInBlock(BB, R);
3835 
3836     // Vectorize the index computations of getelementptr instructions. This
3837     // is primarily intended to catch gather-like idioms ending at
3838     // non-consecutive loads.
3839     if (!GEPs.empty()) {
3840       DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()
3841                    << " underlying objects.\n");
3842       Changed |= vectorizeGEPIndices(BB, R);
3843     }
3844   }
3845 
3846   if (Changed) {
3847     R.optimizeGatherSequence();
3848     DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
3849     DEBUG(verifyFunction(F));
3850   }
3851   return Changed;
3852 }
3853 
3854 /// \brief Check that the Values in the slice in VL array are still existent in
3855 /// the WeakVH array.
3856 /// Vectorization of part of the VL array may cause later values in the VL array
3857 /// to become invalid. We track when this has happened in the WeakVH array.
3858 static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, ArrayRef<WeakVH> VH,
3859                                unsigned SliceBegin, unsigned SliceSize) {
3860   VL = VL.slice(SliceBegin, SliceSize);
3861   VH = VH.slice(SliceBegin, SliceSize);
3862   return !std::equal(VL.begin(), VL.end(), VH.begin());
3863 }
3864 
3865 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
3866                                             unsigned VecRegSize) {
3867   unsigned ChainLen = Chain.size();
3868   DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen
3869         << "\n");
3870   unsigned Sz = R.getVectorElementSize(Chain[0]);
3871   unsigned VF = VecRegSize / Sz;
3872 
3873   if (!isPowerOf2_32(Sz) || VF < 2)
3874     return false;
3875 
3876   // Keep track of values that were deleted by vectorizing in the loop below.
3877   SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end());
3878 
3879   bool Changed = false;
3880   // Look for profitable vectorizable trees at all offsets, starting at zero.
3881   for (unsigned i = 0, e = ChainLen; i < e; ++i) {
3882     if (i + VF > e)
3883       break;
3884 
3885     // Check that a previous iteration of this loop did not delete the Value.
3886     if (hasValueBeenRAUWed(Chain, TrackValues, i, VF))
3887       continue;
3888 
3889     DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i
3890           << "\n");
3891     ArrayRef<Value *> Operands = Chain.slice(i, VF);
3892 
3893     R.buildTree(Operands);
3894     if (R.isTreeTinyAndNotFullyVectorizable())
3895       continue;
3896 
3897     R.computeMinimumValueSizes();
3898 
3899     int Cost = R.getTreeCost();
3900 
3901     DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n");
3902     if (Cost < -SLPCostThreshold) {
3903       DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n");
3904       R.vectorizeTree();
3905 
3906       // Move to the next bundle.
3907       i += VF - 1;
3908       Changed = true;
3909     }
3910   }
3911 
3912   return Changed;
3913 }
3914 
3915 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores,
3916                                         BoUpSLP &R) {
3917   SetVector<StoreInst *> Heads, Tails;
3918   SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain;
3919 
3920   // We may run into multiple chains that merge into a single chain. We mark the
3921   // stores that we vectorized so that we don't visit the same store twice.
3922   BoUpSLP::ValueSet VectorizedStores;
3923   bool Changed = false;
3924 
3925   // Do a quadratic search on all of the given stores and find
3926   // all of the pairs of stores that follow each other.
3927   SmallVector<unsigned, 16> IndexQueue;
3928   for (unsigned i = 0, e = Stores.size(); i < e; ++i) {
3929     IndexQueue.clear();
3930     // If a store has multiple consecutive store candidates, search Stores
3931     // array according to the sequence: from i+1 to e, then from i-1 to 0.
3932     // This is because usually pairing with immediate succeeding or preceding
3933     // candidate create the best chance to find slp vectorization opportunity.
3934     unsigned j = 0;
3935     for (j = i + 1; j < e; ++j)
3936       IndexQueue.push_back(j);
3937     for (j = i; j > 0; --j)
3938       IndexQueue.push_back(j - 1);
3939 
3940     for (auto &k : IndexQueue) {
3941       if (isConsecutiveAccess(Stores[i], Stores[k], *DL, *SE)) {
3942         Tails.insert(Stores[k]);
3943         Heads.insert(Stores[i]);
3944         ConsecutiveChain[Stores[i]] = Stores[k];
3945         break;
3946       }
3947     }
3948   }
3949 
3950   // For stores that start but don't end a link in the chain:
3951   for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end();
3952        it != e; ++it) {
3953     if (Tails.count(*it))
3954       continue;
3955 
3956     // We found a store instr that starts a chain. Now follow the chain and try
3957     // to vectorize it.
3958     BoUpSLP::ValueList Operands;
3959     StoreInst *I = *it;
3960     // Collect the chain into a list.
3961     while (Tails.count(I) || Heads.count(I)) {
3962       if (VectorizedStores.count(I))
3963         break;
3964       Operands.push_back(I);
3965       // Move to the next value in the chain.
3966       I = ConsecutiveChain[I];
3967     }
3968 
3969     // FIXME: Is division-by-2 the correct step? Should we assert that the
3970     // register size is a power-of-2?
3971     for (unsigned Size = R.getMaxVecRegSize(); Size >= R.getMinVecRegSize();
3972          Size /= 2) {
3973       if (vectorizeStoreChain(Operands, R, Size)) {
3974         // Mark the vectorized stores so that we don't vectorize them again.
3975         VectorizedStores.insert(Operands.begin(), Operands.end());
3976         Changed = true;
3977         break;
3978       }
3979     }
3980   }
3981 
3982   return Changed;
3983 }
3984 
3985 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) {
3986 
3987   // Initialize the collections. We will make a single pass over the block.
3988   Stores.clear();
3989   GEPs.clear();
3990 
3991   // Visit the store and getelementptr instructions in BB and organize them in
3992   // Stores and GEPs according to the underlying objects of their pointer
3993   // operands.
3994   for (Instruction &I : *BB) {
3995 
3996     // Ignore store instructions that are volatile or have a pointer operand
3997     // that doesn't point to a scalar type.
3998     if (auto *SI = dyn_cast<StoreInst>(&I)) {
3999       if (!SI->isSimple())
4000         continue;
4001       if (!isValidElementType(SI->getValueOperand()->getType()))
4002         continue;
4003       Stores[GetUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI);
4004     }
4005 
4006     // Ignore getelementptr instructions that have more than one index, a
4007     // constant index, or a pointer operand that doesn't point to a scalar
4008     // type.
4009     else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
4010       auto Idx = GEP->idx_begin()->get();
4011       if (GEP->getNumIndices() > 1 || isa<Constant>(Idx))
4012         continue;
4013       if (!isValidElementType(Idx->getType()))
4014         continue;
4015       if (GEP->getType()->isVectorTy())
4016         continue;
4017       GEPs[GetUnderlyingObject(GEP->getPointerOperand(), *DL)].push_back(GEP);
4018     }
4019   }
4020 }
4021 
4022 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) {
4023   if (!A || !B)
4024     return false;
4025   Value *VL[] = { A, B };
4026   return tryToVectorizeList(VL, R, None, true);
4027 }
4028 
4029 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
4030                                            ArrayRef<Value *> BuildVector,
4031                                            bool AllowReorder) {
4032   if (VL.size() < 2)
4033     return false;
4034 
4035   DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " << VL.size()
4036                << ".\n");
4037 
4038   // Check that all of the parts are scalar instructions of the same type.
4039   Instruction *I0 = dyn_cast<Instruction>(VL[0]);
4040   if (!I0)
4041     return false;
4042 
4043   unsigned Opcode0 = I0->getOpcode();
4044 
4045   unsigned Sz = R.getVectorElementSize(I0);
4046   unsigned MinVF = std::max(2U, R.getMinVecRegSize() / Sz);
4047   unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF);
4048   if (MaxVF < 2)
4049     return false;
4050 
4051   for (Value *V : VL) {
4052     Type *Ty = V->getType();
4053     if (!isValidElementType(Ty))
4054       return false;
4055     Instruction *Inst = dyn_cast<Instruction>(V);
4056     if (!Inst || Inst->getOpcode() != Opcode0)
4057       return false;
4058   }
4059 
4060   bool Changed = false;
4061 
4062   // Keep track of values that were deleted by vectorizing in the loop below.
4063   SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end());
4064 
4065   unsigned NextInst = 0, MaxInst = VL.size();
4066   for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF;
4067        VF /= 2) {
4068     // No actual vectorization should happen, if number of parts is the same as
4069     // provided vectorization factor (i.e. the scalar type is used for vector
4070     // code during codegen).
4071     auto *VecTy = VectorType::get(VL[0]->getType(), VF);
4072     if (TTI->getNumberOfParts(VecTy) == VF)
4073       continue;
4074     for (unsigned I = NextInst; I < MaxInst; ++I) {
4075       unsigned OpsWidth = 0;
4076 
4077       if (I + VF > MaxInst)
4078         OpsWidth = MaxInst - I;
4079       else
4080         OpsWidth = VF;
4081 
4082       if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2)
4083         break;
4084 
4085       // Check that a previous iteration of this loop did not delete the Value.
4086       if (hasValueBeenRAUWed(VL, TrackValues, I, OpsWidth))
4087         continue;
4088 
4089       DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "
4090                    << "\n");
4091       ArrayRef<Value *> Ops = VL.slice(I, OpsWidth);
4092 
4093       ArrayRef<Value *> BuildVectorSlice;
4094       if (!BuildVector.empty())
4095         BuildVectorSlice = BuildVector.slice(I, OpsWidth);
4096 
4097       R.buildTree(Ops, BuildVectorSlice);
4098       // TODO: check if we can allow reordering for more cases.
4099       if (AllowReorder && R.shouldReorder()) {
4100         // Conceptually, there is nothing actually preventing us from trying to
4101         // reorder a larger list. In fact, we do exactly this when vectorizing
4102         // reductions. However, at this point, we only expect to get here from
4103         // tryToVectorizePair().
4104         assert(Ops.size() == 2);
4105         assert(BuildVectorSlice.empty());
4106         Value *ReorderedOps[] = {Ops[1], Ops[0]};
4107         R.buildTree(ReorderedOps, None);
4108       }
4109       if (R.isTreeTinyAndNotFullyVectorizable())
4110         continue;
4111 
4112       R.computeMinimumValueSizes();
4113       int Cost = R.getTreeCost();
4114 
4115       if (Cost < -SLPCostThreshold) {
4116         DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n");
4117         Value *VectorizedRoot = R.vectorizeTree();
4118 
4119         // Reconstruct the build vector by extracting the vectorized root. This
4120         // way we handle the case where some elements of the vector are
4121         // undefined.
4122         //  (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2))
4123         if (!BuildVectorSlice.empty()) {
4124           // The insert point is the last build vector instruction. The
4125           // vectorized root will precede it. This guarantees that we get an
4126           // instruction. The vectorized tree could have been constant folded.
4127           Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back());
4128           unsigned VecIdx = 0;
4129           for (auto &V : BuildVectorSlice) {
4130             IRBuilder<NoFolder> Builder(InsertAfter->getParent(),
4131                                         ++BasicBlock::iterator(InsertAfter));
4132             Instruction *I = cast<Instruction>(V);
4133             assert(isa<InsertElementInst>(I) || isa<InsertValueInst>(I));
4134             Instruction *Extract =
4135                 cast<Instruction>(Builder.CreateExtractElement(
4136                     VectorizedRoot, Builder.getInt32(VecIdx++)));
4137             I->setOperand(1, Extract);
4138             I->removeFromParent();
4139             I->insertAfter(Extract);
4140             InsertAfter = I;
4141           }
4142         }
4143         // Move to the next bundle.
4144         I += VF - 1;
4145         NextInst = I + 1;
4146         Changed = true;
4147       }
4148     }
4149   }
4150 
4151   return Changed;
4152 }
4153 
4154 bool SLPVectorizerPass::tryToVectorize(BinaryOperator *V, BoUpSLP &R) {
4155   if (!V)
4156     return false;
4157 
4158   Value *P = V->getParent();
4159 
4160   // Vectorize in current basic block only.
4161   auto *Op0 = dyn_cast<Instruction>(V->getOperand(0));
4162   auto *Op1 = dyn_cast<Instruction>(V->getOperand(1));
4163   if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P)
4164     return false;
4165 
4166   // Try to vectorize V.
4167   if (tryToVectorizePair(Op0, Op1, R))
4168     return true;
4169 
4170   auto *A = dyn_cast<BinaryOperator>(Op0);
4171   auto *B = dyn_cast<BinaryOperator>(Op1);
4172   // Try to skip B.
4173   if (B && B->hasOneUse()) {
4174     auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0));
4175     auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1));
4176     if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R))
4177       return true;
4178     if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R))
4179       return true;
4180   }
4181 
4182   // Try to skip A.
4183   if (A && A->hasOneUse()) {
4184     auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0));
4185     auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1));
4186     if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R))
4187       return true;
4188     if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R))
4189       return true;
4190   }
4191   return false;
4192 }
4193 
4194 /// \brief Generate a shuffle mask to be used in a reduction tree.
4195 ///
4196 /// \param VecLen The length of the vector to be reduced.
4197 /// \param NumEltsToRdx The number of elements that should be reduced in the
4198 ///        vector.
4199 /// \param IsPairwise Whether the reduction is a pairwise or splitting
4200 ///        reduction. A pairwise reduction will generate a mask of
4201 ///        <0,2,...> or <1,3,..> while a splitting reduction will generate
4202 ///        <2,3, undef,undef> for a vector of 4 and NumElts = 2.
4203 /// \param IsLeft True will generate a mask of even elements, odd otherwise.
4204 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx,
4205                                    bool IsPairwise, bool IsLeft,
4206                                    IRBuilder<> &Builder) {
4207   assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask");
4208 
4209   SmallVector<Constant *, 32> ShuffleMask(
4210       VecLen, UndefValue::get(Builder.getInt32Ty()));
4211 
4212   if (IsPairwise)
4213     // Build a mask of 0, 2, ... (left) or 1, 3, ... (right).
4214     for (unsigned i = 0; i != NumEltsToRdx; ++i)
4215       ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft);
4216   else
4217     // Move the upper half of the vector to the lower half.
4218     for (unsigned i = 0; i != NumEltsToRdx; ++i)
4219       ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i);
4220 
4221   return ConstantVector::get(ShuffleMask);
4222 }
4223 
4224 namespace {
4225 /// Model horizontal reductions.
4226 ///
4227 /// A horizontal reduction is a tree of reduction operations (currently add and
4228 /// fadd) that has operations that can be put into a vector as its leaf.
4229 /// For example, this tree:
4230 ///
4231 /// mul mul mul mul
4232 ///  \  /    \  /
4233 ///   +       +
4234 ///    \     /
4235 ///       +
4236 /// This tree has "mul" as its reduced values and "+" as its reduction
4237 /// operations. A reduction might be feeding into a store or a binary operation
4238 /// feeding a phi.
4239 ///    ...
4240 ///    \  /
4241 ///     +
4242 ///     |
4243 ///  phi +=
4244 ///
4245 ///  Or:
4246 ///    ...
4247 ///    \  /
4248 ///     +
4249 ///     |
4250 ///   *p =
4251 ///
4252 class HorizontalReduction {
4253   SmallVector<Value *, 16> ReductionOps;
4254   SmallVector<Value *, 32> ReducedVals;
4255   // Use map vector to make stable output.
4256   MapVector<Instruction *, Value *> ExtraArgs;
4257 
4258   BinaryOperator *ReductionRoot = nullptr;
4259 
4260   /// The opcode of the reduction.
4261   Instruction::BinaryOps ReductionOpcode = Instruction::BinaryOpsEnd;
4262   /// The opcode of the values we perform a reduction on.
4263   unsigned ReducedValueOpcode = 0;
4264   /// Should we model this reduction as a pairwise reduction tree or a tree that
4265   /// splits the vector in halves and adds those halves.
4266   bool IsPairwiseReduction = false;
4267 
4268   /// Checks if the ParentStackElem.first should be marked as a reduction
4269   /// operation with an extra argument or as extra argument itself.
4270   void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem,
4271                     Value *ExtraArg) {
4272     if (ExtraArgs.count(ParentStackElem.first)) {
4273       ExtraArgs[ParentStackElem.first] = nullptr;
4274       // We ran into something like:
4275       // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg.
4276       // The whole ParentStackElem.first should be considered as an extra value
4277       // in this case.
4278       // Do not perform analysis of remaining operands of ParentStackElem.first
4279       // instruction, this whole instruction is an extra argument.
4280       ParentStackElem.second = ParentStackElem.first->getNumOperands();
4281     } else {
4282       // We ran into something like:
4283       // ParentStackElem.first += ... + ExtraArg + ...
4284       ExtraArgs[ParentStackElem.first] = ExtraArg;
4285     }
4286   }
4287 
4288 public:
4289   HorizontalReduction() = default;
4290 
4291   /// \brief Try to find a reduction tree.
4292   bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B) {
4293     assert((!Phi || is_contained(Phi->operands(), B)) &&
4294            "Thi phi needs to use the binary operator");
4295 
4296     // We could have a initial reductions that is not an add.
4297     //  r *= v1 + v2 + v3 + v4
4298     // In such a case start looking for a tree rooted in the first '+'.
4299     if (Phi) {
4300       if (B->getOperand(0) == Phi) {
4301         Phi = nullptr;
4302         B = dyn_cast<BinaryOperator>(B->getOperand(1));
4303       } else if (B->getOperand(1) == Phi) {
4304         Phi = nullptr;
4305         B = dyn_cast<BinaryOperator>(B->getOperand(0));
4306       }
4307     }
4308 
4309     if (!B)
4310       return false;
4311 
4312     Type *Ty = B->getType();
4313     if (!isValidElementType(Ty))
4314       return false;
4315 
4316     ReductionOpcode = B->getOpcode();
4317     ReducedValueOpcode = 0;
4318     ReductionRoot = B;
4319 
4320     // We currently only support adds.
4321     if ((ReductionOpcode != Instruction::Add &&
4322          ReductionOpcode != Instruction::FAdd) ||
4323         !B->isAssociative())
4324       return false;
4325 
4326     // Post order traverse the reduction tree starting at B. We only handle true
4327     // trees containing only binary operators or selects.
4328     SmallVector<std::pair<Instruction *, unsigned>, 32> Stack;
4329     Stack.push_back(std::make_pair(B, 0));
4330     while (!Stack.empty()) {
4331       Instruction *TreeN = Stack.back().first;
4332       unsigned EdgeToVist = Stack.back().second++;
4333       bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode;
4334 
4335       // Postorder vist.
4336       if (EdgeToVist == 2 || IsReducedValue) {
4337         if (IsReducedValue)
4338           ReducedVals.push_back(TreeN);
4339         else {
4340           auto I = ExtraArgs.find(TreeN);
4341           if (I != ExtraArgs.end() && !I->second) {
4342             // Check if TreeN is an extra argument of its parent operation.
4343             if (Stack.size() <= 1) {
4344               // TreeN can't be an extra argument as it is a root reduction
4345               // operation.
4346               return false;
4347             }
4348             // Yes, TreeN is an extra argument, do not add it to a list of
4349             // reduction operations.
4350             // Stack[Stack.size() - 2] always points to the parent operation.
4351             markExtraArg(Stack[Stack.size() - 2], TreeN);
4352             ExtraArgs.erase(TreeN);
4353           } else
4354             ReductionOps.push_back(TreeN);
4355         }
4356         // Retract.
4357         Stack.pop_back();
4358         continue;
4359       }
4360 
4361       // Visit left or right.
4362       Value *NextV = TreeN->getOperand(EdgeToVist);
4363       if (NextV != Phi) {
4364         auto *I = dyn_cast<Instruction>(NextV);
4365         // Continue analysis if the next operand is a reduction operation or
4366         // (possibly) a reduced value. If the reduced value opcode is not set,
4367         // the first met operation != reduction operation is considered as the
4368         // reduced value class.
4369         if (I && (!ReducedValueOpcode || I->getOpcode() == ReducedValueOpcode ||
4370                   I->getOpcode() == ReductionOpcode)) {
4371           // Only handle trees in the current basic block.
4372           if (I->getParent() != B->getParent()) {
4373             // I is an extra argument for TreeN (its parent operation).
4374             markExtraArg(Stack.back(), I);
4375             continue;
4376           }
4377 
4378           // Each tree node needs to have one user except for the ultimate
4379           // reduction.
4380           if (!I->hasOneUse() && I != B) {
4381             // I is an extra argument for TreeN (its parent operation).
4382             markExtraArg(Stack.back(), I);
4383             continue;
4384           }
4385 
4386           if (I->getOpcode() == ReductionOpcode) {
4387             // We need to be able to reassociate the reduction operations.
4388             if (!I->isAssociative()) {
4389               // I is an extra argument for TreeN (its parent operation).
4390               markExtraArg(Stack.back(), I);
4391               continue;
4392             }
4393           } else if (ReducedValueOpcode &&
4394                      ReducedValueOpcode != I->getOpcode()) {
4395             // Make sure that the opcodes of the operations that we are going to
4396             // reduce match.
4397             // I is an extra argument for TreeN (its parent operation).
4398             markExtraArg(Stack.back(), I);
4399             continue;
4400           } else if (!ReducedValueOpcode)
4401             ReducedValueOpcode = I->getOpcode();
4402 
4403           Stack.push_back(std::make_pair(I, 0));
4404           continue;
4405         }
4406       }
4407       // NextV is an extra argument for TreeN (its parent operation).
4408       markExtraArg(Stack.back(), NextV);
4409     }
4410     return true;
4411   }
4412 
4413   /// \brief Attempt to vectorize the tree found by
4414   /// matchAssociativeReduction.
4415   bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) {
4416     if (ReducedVals.empty())
4417       return false;
4418 
4419     // If there is a sufficient number of reduction values, reduce
4420     // to a nearby power-of-2. Can safely generate oversized
4421     // vectors and rely on the backend to split them to legal sizes.
4422     unsigned NumReducedVals = ReducedVals.size();
4423     if (NumReducedVals < 4)
4424       return false;
4425 
4426     unsigned ReduxWidth = PowerOf2Floor(NumReducedVals);
4427 
4428     Value *VectorizedTree = nullptr;
4429     IRBuilder<> Builder(ReductionRoot);
4430     FastMathFlags Unsafe;
4431     Unsafe.setUnsafeAlgebra();
4432     Builder.setFastMathFlags(Unsafe);
4433     unsigned i = 0;
4434 
4435     BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues;
4436     // The same extra argument may be used several time, so log each attempt
4437     // to use it.
4438     for (auto &Pair : ExtraArgs)
4439       ExternallyUsedValues[Pair.second].push_back(Pair.first);
4440     while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) {
4441       auto VL = makeArrayRef(&ReducedVals[i], ReduxWidth);
4442       V.buildTree(VL, ExternallyUsedValues, ReductionOps);
4443       if (V.shouldReorder()) {
4444         SmallVector<Value *, 8> Reversed(VL.rbegin(), VL.rend());
4445         V.buildTree(Reversed, ExternallyUsedValues, ReductionOps);
4446       }
4447       if (V.isTreeTinyAndNotFullyVectorizable())
4448         break;
4449 
4450       V.computeMinimumValueSizes();
4451 
4452       // Estimate cost.
4453       int Cost =
4454           V.getTreeCost() + getReductionCost(TTI, ReducedVals[i], ReduxWidth);
4455       if (Cost >= -SLPCostThreshold)
4456         break;
4457 
4458       DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost
4459                    << ". (HorRdx)\n");
4460 
4461       // Vectorize a tree.
4462       DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc();
4463       Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues);
4464 
4465       // Emit a reduction.
4466       Value *ReducedSubTree =
4467           emitReduction(VectorizedRoot, Builder, ReduxWidth, ReductionOps);
4468       if (VectorizedTree) {
4469         Builder.SetCurrentDebugLocation(Loc);
4470         VectorizedTree = Builder.CreateBinOp(ReductionOpcode, VectorizedTree,
4471                                              ReducedSubTree, "bin.rdx");
4472         propagateIRFlags(VectorizedTree, ReductionOps);
4473       } else
4474         VectorizedTree = ReducedSubTree;
4475       i += ReduxWidth;
4476       ReduxWidth = PowerOf2Floor(NumReducedVals - i);
4477     }
4478 
4479     if (VectorizedTree) {
4480       // Finish the reduction.
4481       for (; i < NumReducedVals; ++i) {
4482         auto *I = cast<Instruction>(ReducedVals[i]);
4483         Builder.SetCurrentDebugLocation(I->getDebugLoc());
4484         VectorizedTree =
4485             Builder.CreateBinOp(ReductionOpcode, VectorizedTree, I);
4486         propagateIRFlags(VectorizedTree, ReductionOps);
4487       }
4488       for (auto &Pair : ExternallyUsedValues) {
4489         assert(!Pair.second.empty() &&
4490                "At least one DebugLoc must be inserted");
4491         // Add each externally used value to the final reduction.
4492         for (auto *I : Pair.second) {
4493           Builder.SetCurrentDebugLocation(I->getDebugLoc());
4494           VectorizedTree = Builder.CreateBinOp(ReductionOpcode, VectorizedTree,
4495                                                Pair.first, "bin.extra");
4496           propagateIRFlags(VectorizedTree, I);
4497         }
4498       }
4499       // Update users.
4500       ReductionRoot->replaceAllUsesWith(VectorizedTree);
4501     }
4502     return VectorizedTree != nullptr;
4503   }
4504 
4505   unsigned numReductionValues() const {
4506     return ReducedVals.size();
4507   }
4508 
4509 private:
4510   /// \brief Calculate the cost of a reduction.
4511   int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal,
4512                        unsigned ReduxWidth) {
4513     Type *ScalarTy = FirstReducedVal->getType();
4514     Type *VecTy = VectorType::get(ScalarTy, ReduxWidth);
4515 
4516     int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true);
4517     int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false);
4518 
4519     IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost;
4520     int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost;
4521 
4522     int ScalarReduxCost =
4523         (ReduxWidth - 1) *
4524         TTI->getArithmeticInstrCost(ReductionOpcode, ScalarTy);
4525 
4526     DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost
4527                  << " for reduction that starts with " << *FirstReducedVal
4528                  << " (It is a "
4529                  << (IsPairwiseReduction ? "pairwise" : "splitting")
4530                  << " reduction)\n");
4531 
4532     return VecReduxCost - ScalarReduxCost;
4533   }
4534 
4535   /// \brief Emit a horizontal reduction of the vectorized value.
4536   Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder,
4537                        unsigned ReduxWidth, ArrayRef<Value *> RedOps) {
4538     assert(VectorizedValue && "Need to have a vectorized tree node");
4539     assert(isPowerOf2_32(ReduxWidth) &&
4540            "We only handle power-of-two reductions for now");
4541 
4542     Value *TmpVec = VectorizedValue;
4543     for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) {
4544       if (IsPairwiseReduction) {
4545         Value *LeftMask =
4546           createRdxShuffleMask(ReduxWidth, i, true, true, Builder);
4547         Value *RightMask =
4548           createRdxShuffleMask(ReduxWidth, i, true, false, Builder);
4549 
4550         Value *LeftShuf = Builder.CreateShuffleVector(
4551           TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l");
4552         Value *RightShuf = Builder.CreateShuffleVector(
4553           TmpVec, UndefValue::get(TmpVec->getType()), (RightMask),
4554           "rdx.shuf.r");
4555         TmpVec = Builder.CreateBinOp(ReductionOpcode, LeftShuf, RightShuf,
4556                                      "bin.rdx");
4557       } else {
4558         Value *UpperHalf =
4559           createRdxShuffleMask(ReduxWidth, i, false, false, Builder);
4560         Value *Shuf = Builder.CreateShuffleVector(
4561           TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf");
4562         TmpVec = Builder.CreateBinOp(ReductionOpcode, TmpVec, Shuf, "bin.rdx");
4563       }
4564       propagateIRFlags(TmpVec, RedOps);
4565     }
4566 
4567     // The result is in the first element of the vector.
4568     return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0));
4569   }
4570 };
4571 } // end anonymous namespace
4572 
4573 /// \brief Recognize construction of vectors like
4574 ///  %ra = insertelement <4 x float> undef, float %s0, i32 0
4575 ///  %rb = insertelement <4 x float> %ra, float %s1, i32 1
4576 ///  %rc = insertelement <4 x float> %rb, float %s2, i32 2
4577 ///  %rd = insertelement <4 x float> %rc, float %s3, i32 3
4578 ///
4579 /// Returns true if it matches
4580 ///
4581 static bool findBuildVector(InsertElementInst *FirstInsertElem,
4582                             SmallVectorImpl<Value *> &BuildVector,
4583                             SmallVectorImpl<Value *> &BuildVectorOpds) {
4584   if (!isa<UndefValue>(FirstInsertElem->getOperand(0)))
4585     return false;
4586 
4587   InsertElementInst *IE = FirstInsertElem;
4588   while (true) {
4589     BuildVector.push_back(IE);
4590     BuildVectorOpds.push_back(IE->getOperand(1));
4591 
4592     if (IE->use_empty())
4593       return false;
4594 
4595     InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back());
4596     if (!NextUse)
4597       return true;
4598 
4599     // If this isn't the final use, make sure the next insertelement is the only
4600     // use. It's OK if the final constructed vector is used multiple times
4601     if (!IE->hasOneUse())
4602       return false;
4603 
4604     IE = NextUse;
4605   }
4606 
4607   return false;
4608 }
4609 
4610 /// \brief Like findBuildVector, but looks backwards for construction of aggregate.
4611 ///
4612 /// \return true if it matches.
4613 static bool findBuildAggregate(InsertValueInst *IV,
4614                                SmallVectorImpl<Value *> &BuildVector,
4615                                SmallVectorImpl<Value *> &BuildVectorOpds) {
4616   Value *V;
4617   do {
4618     BuildVector.push_back(IV);
4619     BuildVectorOpds.push_back(IV->getInsertedValueOperand());
4620     V = IV->getAggregateOperand();
4621     if (isa<UndefValue>(V))
4622       break;
4623     IV = dyn_cast<InsertValueInst>(V);
4624     if (!IV || !IV->hasOneUse())
4625       return false;
4626   } while (true);
4627   std::reverse(BuildVector.begin(), BuildVector.end());
4628   std::reverse(BuildVectorOpds.begin(), BuildVectorOpds.end());
4629   return true;
4630 }
4631 
4632 static bool PhiTypeSorterFunc(Value *V, Value *V2) {
4633   return V->getType() < V2->getType();
4634 }
4635 
4636 /// \brief Try and get a reduction value from a phi node.
4637 ///
4638 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions
4639 /// if they come from either \p ParentBB or a containing loop latch.
4640 ///
4641 /// \returns A candidate reduction value if possible, or \code nullptr \endcode
4642 /// if not possible.
4643 static Value *getReductionValue(const DominatorTree *DT, PHINode *P,
4644                                 BasicBlock *ParentBB, LoopInfo *LI) {
4645   // There are situations where the reduction value is not dominated by the
4646   // reduction phi. Vectorizing such cases has been reported to cause
4647   // miscompiles. See PR25787.
4648   auto DominatedReduxValue = [&](Value *R) {
4649     return (
4650         dyn_cast<Instruction>(R) &&
4651         DT->dominates(P->getParent(), dyn_cast<Instruction>(R)->getParent()));
4652   };
4653 
4654   Value *Rdx = nullptr;
4655 
4656   // Return the incoming value if it comes from the same BB as the phi node.
4657   if (P->getIncomingBlock(0) == ParentBB) {
4658     Rdx = P->getIncomingValue(0);
4659   } else if (P->getIncomingBlock(1) == ParentBB) {
4660     Rdx = P->getIncomingValue(1);
4661   }
4662 
4663   if (Rdx && DominatedReduxValue(Rdx))
4664     return Rdx;
4665 
4666   // Otherwise, check whether we have a loop latch to look at.
4667   Loop *BBL = LI->getLoopFor(ParentBB);
4668   if (!BBL)
4669     return nullptr;
4670   BasicBlock *BBLatch = BBL->getLoopLatch();
4671   if (!BBLatch)
4672     return nullptr;
4673 
4674   // There is a loop latch, return the incoming value if it comes from
4675   // that. This reduction pattern occasionally turns up.
4676   if (P->getIncomingBlock(0) == BBLatch) {
4677     Rdx = P->getIncomingValue(0);
4678   } else if (P->getIncomingBlock(1) == BBLatch) {
4679     Rdx = P->getIncomingValue(1);
4680   }
4681 
4682   if (Rdx && DominatedReduxValue(Rdx))
4683     return Rdx;
4684 
4685   return nullptr;
4686 }
4687 
4688 namespace {
4689 /// Tracks instructons and its children.
4690 class WeakVHWithLevel final : public CallbackVH {
4691   /// Operand index of the instruction currently beeing analized.
4692   unsigned Level = 0;
4693   /// Is this the instruction that should be vectorized, or are we now
4694   /// processing children (i.e. operands of this instruction) for potential
4695   /// vectorization?
4696   bool IsInitial = true;
4697 
4698 public:
4699   explicit WeakVHWithLevel() = default;
4700   WeakVHWithLevel(Value *V) : CallbackVH(V){};
4701   /// Restart children analysis each time it is repaced by the new instruction.
4702   void allUsesReplacedWith(Value *New) override {
4703     setValPtr(New);
4704     Level = 0;
4705     IsInitial = true;
4706   }
4707   /// Check if the instruction was not deleted during vectorization.
4708   bool isValid() const { return !getValPtr(); }
4709   /// Is the istruction itself must be vectorized?
4710   bool isInitial() const { return IsInitial; }
4711   /// Try to vectorize children.
4712   void clearInitial() { IsInitial = false; }
4713   /// Are all children processed already?
4714   bool isFinal() const {
4715     assert(getValPtr() &&
4716            (isa<Instruction>(getValPtr()) &&
4717             cast<Instruction>(getValPtr())->getNumOperands() >= Level));
4718     return getValPtr() &&
4719            cast<Instruction>(getValPtr())->getNumOperands() == Level;
4720   }
4721   /// Get next child operation.
4722   Value *nextOperand() {
4723     assert(getValPtr() && isa<Instruction>(getValPtr()) &&
4724            cast<Instruction>(getValPtr())->getNumOperands() > Level);
4725     return cast<Instruction>(getValPtr())->getOperand(Level++);
4726   }
4727   virtual ~WeakVHWithLevel() = default;
4728 };
4729 } // namespace
4730 
4731 /// \brief Attempt to reduce a horizontal reduction.
4732 /// If it is legal to match a horizontal reduction feeding
4733 /// the phi node P with reduction operators Root in a basic block BB, then check
4734 /// if it can be done.
4735 /// \returns true if a horizontal reduction was matched and reduced.
4736 /// \returns false if a horizontal reduction was not matched.
4737 static bool canBeVectorized(
4738     PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R,
4739     TargetTransformInfo *TTI,
4740     const function_ref<bool(BinaryOperator *, BoUpSLP &)> Vectorize) {
4741   if (!ShouldVectorizeHor)
4742     return false;
4743 
4744   if (!Root)
4745     return false;
4746 
4747   if (Root->getParent() != BB)
4748     return false;
4749   SmallVector<WeakVHWithLevel, 8> Stack(1, Root);
4750   SmallSet<Value *, 8> VisitedInstrs;
4751   bool Res = false;
4752   while (!Stack.empty()) {
4753     Value *V = Stack.back();
4754     if (!V) {
4755       Stack.pop_back();
4756       continue;
4757     }
4758     auto *Inst = dyn_cast<Instruction>(V);
4759     if (!Inst || isa<PHINode>(Inst)) {
4760       Stack.pop_back();
4761       continue;
4762     }
4763     if (Stack.back().isInitial()) {
4764       Stack.back().clearInitial();
4765       if (auto *BI = dyn_cast<BinaryOperator>(Inst)) {
4766         HorizontalReduction HorRdx;
4767         if (HorRdx.matchAssociativeReduction(P, BI)) {
4768           if (HorRdx.tryToReduce(R, TTI)) {
4769             Res = true;
4770             P = nullptr;
4771             continue;
4772           }
4773         }
4774         if (P) {
4775           Inst = dyn_cast<Instruction>(BI->getOperand(0));
4776           if (Inst == P)
4777             Inst = dyn_cast<Instruction>(BI->getOperand(1));
4778           if (!Inst) {
4779             P = nullptr;
4780             continue;
4781           }
4782         }
4783       }
4784       P = nullptr;
4785       if (Vectorize(dyn_cast<BinaryOperator>(Inst), R)) {
4786         Res = true;
4787         continue;
4788       }
4789     }
4790     if (Stack.back().isFinal()) {
4791       Stack.pop_back();
4792       continue;
4793     }
4794 
4795     if (auto *NextV = dyn_cast<Instruction>(Stack.back().nextOperand()))
4796       if (NextV->getParent() == BB && VisitedInstrs.insert(NextV).second &&
4797           Stack.size() < RecursionMaxDepth)
4798         Stack.push_back(NextV);
4799   }
4800   return Res;
4801 }
4802 
4803 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V,
4804                                                  BasicBlock *BB, BoUpSLP &R,
4805                                                  TargetTransformInfo *TTI) {
4806   if (!V)
4807     return false;
4808   auto *I = dyn_cast<Instruction>(V);
4809   if (!I)
4810     return false;
4811 
4812   if (!isa<BinaryOperator>(I))
4813     P = nullptr;
4814   // Try to match and vectorize a horizontal reduction.
4815   return canBeVectorized(P, I, BB, R, TTI,
4816                          [this](BinaryOperator *BI, BoUpSLP &R) -> bool {
4817                            return tryToVectorize(BI, R);
4818                          });
4819 }
4820 
4821 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
4822   bool Changed = false;
4823   SmallVector<Value *, 4> Incoming;
4824   SmallSet<Value *, 16> VisitedInstrs;
4825 
4826   bool HaveVectorizedPhiNodes = true;
4827   while (HaveVectorizedPhiNodes) {
4828     HaveVectorizedPhiNodes = false;
4829 
4830     // Collect the incoming values from the PHIs.
4831     Incoming.clear();
4832     for (Instruction &I : *BB) {
4833       PHINode *P = dyn_cast<PHINode>(&I);
4834       if (!P)
4835         break;
4836 
4837       if (!VisitedInstrs.count(P))
4838         Incoming.push_back(P);
4839     }
4840 
4841     // Sort by type.
4842     std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc);
4843 
4844     // Try to vectorize elements base on their type.
4845     for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(),
4846                                            E = Incoming.end();
4847          IncIt != E;) {
4848 
4849       // Look for the next elements with the same type.
4850       SmallVector<Value *, 4>::iterator SameTypeIt = IncIt;
4851       while (SameTypeIt != E &&
4852              (*SameTypeIt)->getType() == (*IncIt)->getType()) {
4853         VisitedInstrs.insert(*SameTypeIt);
4854         ++SameTypeIt;
4855       }
4856 
4857       // Try to vectorize them.
4858       unsigned NumElts = (SameTypeIt - IncIt);
4859       DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n");
4860       if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R)) {
4861         // Success start over because instructions might have been changed.
4862         HaveVectorizedPhiNodes = true;
4863         Changed = true;
4864         break;
4865       }
4866 
4867       // Start over at the next instruction of a different type (or the end).
4868       IncIt = SameTypeIt;
4869     }
4870   }
4871 
4872   VisitedInstrs.clear();
4873 
4874   for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) {
4875     // We may go through BB multiple times so skip the one we have checked.
4876     if (!VisitedInstrs.insert(&*it).second)
4877       continue;
4878 
4879     if (isa<DbgInfoIntrinsic>(it))
4880       continue;
4881 
4882     // Try to vectorize reductions that use PHINodes.
4883     if (PHINode *P = dyn_cast<PHINode>(it)) {
4884       // Check that the PHI is a reduction PHI.
4885       if (P->getNumIncomingValues() != 2)
4886         return Changed;
4887 
4888       // Try to match and vectorize a horizontal reduction.
4889       if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R,
4890                                    TTI)) {
4891         Changed = true;
4892         it = BB->begin();
4893         e = BB->end();
4894         continue;
4895       }
4896       continue;
4897     }
4898 
4899     if (ShouldStartVectorizeHorAtStore) {
4900       if (StoreInst *SI = dyn_cast<StoreInst>(it)) {
4901         // Try to match and vectorize a horizontal reduction.
4902         if (vectorizeRootInstruction(nullptr, SI->getValueOperand(), BB, R,
4903                                      TTI)) {
4904           Changed = true;
4905           it = BB->begin();
4906           e = BB->end();
4907           continue;
4908         }
4909       }
4910     }
4911 
4912     // Try to vectorize horizontal reductions feeding into a return.
4913     if (ReturnInst *RI = dyn_cast<ReturnInst>(it)) {
4914       if (RI->getNumOperands() != 0) {
4915         // Try to match and vectorize a horizontal reduction.
4916         if (vectorizeRootInstruction(nullptr, RI->getOperand(0), BB, R, TTI)) {
4917           Changed = true;
4918           it = BB->begin();
4919           e = BB->end();
4920           continue;
4921         }
4922       }
4923     }
4924 
4925     // Try to vectorize trees that start at compare instructions.
4926     if (CmpInst *CI = dyn_cast<CmpInst>(it)) {
4927       if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) {
4928         Changed = true;
4929         // We would like to start over since some instructions are deleted
4930         // and the iterator may become invalid value.
4931         it = BB->begin();
4932         e = BB->end();
4933         continue;
4934       }
4935 
4936       for (int I = 0; I < 2; ++I) {
4937         if (vectorizeRootInstruction(nullptr, CI->getOperand(I), BB, R, TTI)) {
4938           Changed = true;
4939           // We would like to start over since some instructions are deleted
4940           // and the iterator may become invalid value.
4941           it = BB->begin();
4942           e = BB->end();
4943           break;
4944         }
4945       }
4946       continue;
4947     }
4948 
4949     // Try to vectorize trees that start at insertelement instructions.
4950     if (InsertElementInst *FirstInsertElem = dyn_cast<InsertElementInst>(it)) {
4951       SmallVector<Value *, 16> BuildVector;
4952       SmallVector<Value *, 16> BuildVectorOpds;
4953       if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds))
4954         continue;
4955 
4956       // Vectorize starting with the build vector operands ignoring the
4957       // BuildVector instructions for the purpose of scheduling and user
4958       // extraction.
4959       if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) {
4960         Changed = true;
4961         it = BB->begin();
4962         e = BB->end();
4963       }
4964 
4965       continue;
4966     }
4967 
4968     // Try to vectorize trees that start at insertvalue instructions feeding into
4969     // a store.
4970     if (StoreInst *SI = dyn_cast<StoreInst>(it)) {
4971       if (InsertValueInst *LastInsertValue = dyn_cast<InsertValueInst>(SI->getValueOperand())) {
4972         const DataLayout &DL = BB->getModule()->getDataLayout();
4973         if (R.canMapToVector(SI->getValueOperand()->getType(), DL)) {
4974           SmallVector<Value *, 16> BuildVector;
4975           SmallVector<Value *, 16> BuildVectorOpds;
4976           if (!findBuildAggregate(LastInsertValue, BuildVector, BuildVectorOpds))
4977             continue;
4978 
4979           DEBUG(dbgs() << "SLP: store of array mappable to vector: " << *SI << "\n");
4980           if (tryToVectorizeList(BuildVectorOpds, R, BuildVector, false)) {
4981             Changed = true;
4982             it = BB->begin();
4983             e = BB->end();
4984           }
4985           continue;
4986         }
4987       }
4988     }
4989   }
4990 
4991   return Changed;
4992 }
4993 
4994 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) {
4995   auto Changed = false;
4996   for (auto &Entry : GEPs) {
4997 
4998     // If the getelementptr list has fewer than two elements, there's nothing
4999     // to do.
5000     if (Entry.second.size() < 2)
5001       continue;
5002 
5003     DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "
5004                  << Entry.second.size() << ".\n");
5005 
5006     // We process the getelementptr list in chunks of 16 (like we do for
5007     // stores) to minimize compile-time.
5008     for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += 16) {
5009       auto Len = std::min<unsigned>(BE - BI, 16);
5010       auto GEPList = makeArrayRef(&Entry.second[BI], Len);
5011 
5012       // Initialize a set a candidate getelementptrs. Note that we use a
5013       // SetVector here to preserve program order. If the index computations
5014       // are vectorizable and begin with loads, we want to minimize the chance
5015       // of having to reorder them later.
5016       SetVector<Value *> Candidates(GEPList.begin(), GEPList.end());
5017 
5018       // Some of the candidates may have already been vectorized after we
5019       // initially collected them. If so, the WeakVHs will have nullified the
5020       // values, so remove them from the set of candidates.
5021       Candidates.remove(nullptr);
5022 
5023       // Remove from the set of candidates all pairs of getelementptrs with
5024       // constant differences. Such getelementptrs are likely not good
5025       // candidates for vectorization in a bottom-up phase since one can be
5026       // computed from the other. We also ensure all candidate getelementptr
5027       // indices are unique.
5028       for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) {
5029         auto *GEPI = cast<GetElementPtrInst>(GEPList[I]);
5030         if (!Candidates.count(GEPI))
5031           continue;
5032         auto *SCEVI = SE->getSCEV(GEPList[I]);
5033         for (int J = I + 1; J < E && Candidates.size() > 1; ++J) {
5034           auto *GEPJ = cast<GetElementPtrInst>(GEPList[J]);
5035           auto *SCEVJ = SE->getSCEV(GEPList[J]);
5036           if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) {
5037             Candidates.remove(GEPList[I]);
5038             Candidates.remove(GEPList[J]);
5039           } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) {
5040             Candidates.remove(GEPList[J]);
5041           }
5042         }
5043       }
5044 
5045       // We break out of the above computation as soon as we know there are
5046       // fewer than two candidates remaining.
5047       if (Candidates.size() < 2)
5048         continue;
5049 
5050       // Add the single, non-constant index of each candidate to the bundle. We
5051       // ensured the indices met these constraints when we originally collected
5052       // the getelementptrs.
5053       SmallVector<Value *, 16> Bundle(Candidates.size());
5054       auto BundleIndex = 0u;
5055       for (auto *V : Candidates) {
5056         auto *GEP = cast<GetElementPtrInst>(V);
5057         auto *GEPIdx = GEP->idx_begin()->get();
5058         assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx));
5059         Bundle[BundleIndex++] = GEPIdx;
5060       }
5061 
5062       // Try and vectorize the indices. We are currently only interested in
5063       // gather-like cases of the form:
5064       //
5065       // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ...
5066       //
5067       // where the loads of "a", the loads of "b", and the subtractions can be
5068       // performed in parallel. It's likely that detecting this pattern in a
5069       // bottom-up phase will be simpler and less costly than building a
5070       // full-blown top-down phase beginning at the consecutive loads.
5071       Changed |= tryToVectorizeList(Bundle, R);
5072     }
5073   }
5074   return Changed;
5075 }
5076 
5077 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) {
5078   bool Changed = false;
5079   // Attempt to sort and vectorize each of the store-groups.
5080   for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e;
5081        ++it) {
5082     if (it->second.size() < 2)
5083       continue;
5084 
5085     DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
5086           << it->second.size() << ".\n");
5087 
5088     // Process the stores in chunks of 16.
5089     // TODO: The limit of 16 inhibits greater vectorization factors.
5090     //       For example, AVX2 supports v32i8. Increasing this limit, however,
5091     //       may cause a significant compile-time increase.
5092     for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) {
5093       unsigned Len = std::min<unsigned>(CE - CI, 16);
5094       Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len), R);
5095     }
5096   }
5097   return Changed;
5098 }
5099 
5100 char SLPVectorizer::ID = 0;
5101 static const char lv_name[] = "SLP Vectorizer";
5102 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false)
5103 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
5104 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
5105 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
5106 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
5107 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
5108 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
5109 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false)
5110 
5111 namespace llvm {
5112 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); }
5113 }
5114