1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10 // stores that can be put together into vector-stores. Next, it attempts to
11 // construct vectorizable tree using the use-def chains. If a profitable tree
12 // was found, the SLP vectorizer performs vectorization on the tree.
13 //
14 // The pass is inspired by the work described in the paper:
15 //  "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
16 //
17 //===----------------------------------------------------------------------===//
18 #include "llvm/Transforms/Vectorize/SLPVectorizer.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/PostOrderIterator.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CodeMetrics.h"
24 #include "llvm/Analysis/GlobalsModRef.h"
25 #include "llvm/Analysis/LoopAccessAnalysis.h"
26 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/Analysis/VectorUtils.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/IRBuilder.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/Module.h"
35 #include "llvm/IR/NoFolder.h"
36 #include "llvm/IR/Type.h"
37 #include "llvm/IR/Value.h"
38 #include "llvm/IR/Verifier.h"
39 #include "llvm/Pass.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Transforms/Vectorize.h"
44 #include <algorithm>
45 #include <memory>
46 
47 using namespace llvm;
48 using namespace slpvectorizer;
49 
50 #define SV_NAME "slp-vectorizer"
51 #define DEBUG_TYPE "SLP"
52 
53 STATISTIC(NumVectorInstructions, "Number of vector instructions generated");
54 
55 static cl::opt<int>
56     SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
57                      cl::desc("Only vectorize if you gain more than this "
58                               "number "));
59 
60 static cl::opt<bool>
61 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden,
62                    cl::desc("Attempt to vectorize horizontal reductions"));
63 
64 static cl::opt<bool> ShouldStartVectorizeHorAtStore(
65     "slp-vectorize-hor-store", cl::init(false), cl::Hidden,
66     cl::desc(
67         "Attempt to vectorize horizontal reductions feeding into a store"));
68 
69 static cl::opt<int>
70 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden,
71     cl::desc("Attempt to vectorize for this register size in bits"));
72 
73 /// Limits the size of scheduling regions in a block.
74 /// It avoid long compile times for _very_ large blocks where vector
75 /// instructions are spread over a wide range.
76 /// This limit is way higher than needed by real-world functions.
77 static cl::opt<int>
78 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden,
79     cl::desc("Limit the size of the SLP scheduling region per block"));
80 
81 static cl::opt<int> MinVectorRegSizeOption(
82     "slp-min-reg-size", cl::init(128), cl::Hidden,
83     cl::desc("Attempt to vectorize for this register size in bits"));
84 
85 static cl::opt<unsigned> RecursionMaxDepth(
86     "slp-recursion-max-depth", cl::init(12), cl::Hidden,
87     cl::desc("Limit the recursion depth when building a vectorizable tree"));
88 
89 static cl::opt<unsigned> MinTreeSize(
90     "slp-min-tree-size", cl::init(3), cl::Hidden,
91     cl::desc("Only vectorize small trees if they are fully vectorizable"));
92 
93 // Limit the number of alias checks. The limit is chosen so that
94 // it has no negative effect on the llvm benchmarks.
95 static const unsigned AliasedCheckLimit = 10;
96 
97 // Another limit for the alias checks: The maximum distance between load/store
98 // instructions where alias checks are done.
99 // This limit is useful for very large basic blocks.
100 static const unsigned MaxMemDepDistance = 160;
101 
102 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling
103 /// regions to be handled.
104 static const int MinScheduleRegionSize = 16;
105 
106 /// \brief Predicate for the element types that the SLP vectorizer supports.
107 ///
108 /// The most important thing to filter here are types which are invalid in LLVM
109 /// vectors. We also filter target specific types which have absolutely no
110 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
111 /// avoids spending time checking the cost model and realizing that they will
112 /// be inevitably scalarized.
113 static bool isValidElementType(Type *Ty) {
114   return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
115          !Ty->isPPC_FP128Ty();
116 }
117 
118 /// \returns true if all of the instructions in \p VL are in the same block or
119 /// false otherwise.
120 static bool allSameBlock(ArrayRef<Value *> VL) {
121   Instruction *I0 = dyn_cast<Instruction>(VL[0]);
122   if (!I0)
123     return false;
124   BasicBlock *BB = I0->getParent();
125   for (int i = 1, e = VL.size(); i < e; i++) {
126     Instruction *I = dyn_cast<Instruction>(VL[i]);
127     if (!I)
128       return false;
129 
130     if (BB != I->getParent())
131       return false;
132   }
133   return true;
134 }
135 
136 /// \returns True if all of the values in \p VL are constants.
137 static bool allConstant(ArrayRef<Value *> VL) {
138   for (Value *i : VL)
139     if (!isa<Constant>(i))
140       return false;
141   return true;
142 }
143 
144 /// \returns True if all of the values in \p VL are identical.
145 static bool isSplat(ArrayRef<Value *> VL) {
146   for (unsigned i = 1, e = VL.size(); i < e; ++i)
147     if (VL[i] != VL[0])
148       return false;
149   return true;
150 }
151 
152 ///\returns Opcode that can be clubbed with \p Op to create an alternate
153 /// sequence which can later be merged as a ShuffleVector instruction.
154 static unsigned getAltOpcode(unsigned Op) {
155   switch (Op) {
156   case Instruction::FAdd:
157     return Instruction::FSub;
158   case Instruction::FSub:
159     return Instruction::FAdd;
160   case Instruction::Add:
161     return Instruction::Sub;
162   case Instruction::Sub:
163     return Instruction::Add;
164   default:
165     return 0;
166   }
167 }
168 
169 ///\returns bool representing if Opcode \p Op can be part
170 /// of an alternate sequence which can later be merged as
171 /// a ShuffleVector instruction.
172 static bool canCombineAsAltInst(unsigned Op) {
173   return Op == Instruction::FAdd || Op == Instruction::FSub ||
174          Op == Instruction::Sub || Op == Instruction::Add;
175 }
176 
177 /// \returns ShuffleVector instruction if instructions in \p VL have
178 ///  alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence.
179 /// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...)
180 static unsigned isAltInst(ArrayRef<Value *> VL) {
181   Instruction *I0 = dyn_cast<Instruction>(VL[0]);
182   unsigned Opcode = I0->getOpcode();
183   unsigned AltOpcode = getAltOpcode(Opcode);
184   for (int i = 1, e = VL.size(); i < e; i++) {
185     Instruction *I = dyn_cast<Instruction>(VL[i]);
186     if (!I || I->getOpcode() != ((i & 1) ? AltOpcode : Opcode))
187       return 0;
188   }
189   return Instruction::ShuffleVector;
190 }
191 
192 /// \returns The opcode if all of the Instructions in \p VL have the same
193 /// opcode, or zero.
194 static unsigned getSameOpcode(ArrayRef<Value *> VL) {
195   Instruction *I0 = dyn_cast<Instruction>(VL[0]);
196   if (!I0)
197     return 0;
198   unsigned Opcode = I0->getOpcode();
199   for (int i = 1, e = VL.size(); i < e; i++) {
200     Instruction *I = dyn_cast<Instruction>(VL[i]);
201     if (!I || Opcode != I->getOpcode()) {
202       if (canCombineAsAltInst(Opcode) && i == 1)
203         return isAltInst(VL);
204       return 0;
205     }
206   }
207   return Opcode;
208 }
209 
210 /// Get the intersection (logical and) of all of the potential IR flags
211 /// of each scalar operation (VL) that will be converted into a vector (I).
212 /// Flag set: NSW, NUW, exact, and all of fast-math.
213 static void propagateIRFlags(Value *I, ArrayRef<Value *> VL) {
214   if (auto *VecOp = dyn_cast<Instruction>(I)) {
215     if (auto *Intersection = dyn_cast<Instruction>(VL[0])) {
216       // Intersection is initialized to the 0th scalar,
217       // so start counting from index '1'.
218       for (int i = 1, e = VL.size(); i < e; ++i) {
219         if (auto *Scalar = dyn_cast<Instruction>(VL[i]))
220           Intersection->andIRFlags(Scalar);
221       }
222       VecOp->copyIRFlags(Intersection);
223     }
224   }
225 }
226 
227 /// \returns true if all of the values in \p VL have the same type or false
228 /// otherwise.
229 static bool allSameType(ArrayRef<Value *> VL) {
230   Type *Ty = VL[0]->getType();
231   for (int i = 1, e = VL.size(); i < e; i++)
232     if (VL[i]->getType() != Ty)
233       return false;
234 
235   return true;
236 }
237 
238 /// \returns True if Extract{Value,Element} instruction extracts element Idx.
239 static bool matchExtractIndex(Instruction *E, unsigned Idx, unsigned Opcode) {
240   assert(Opcode == Instruction::ExtractElement ||
241          Opcode == Instruction::ExtractValue);
242   if (Opcode == Instruction::ExtractElement) {
243     ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1));
244     return CI && CI->getZExtValue() == Idx;
245   } else {
246     ExtractValueInst *EI = cast<ExtractValueInst>(E);
247     return EI->getNumIndices() == 1 && *EI->idx_begin() == Idx;
248   }
249 }
250 
251 /// \returns True if in-tree use also needs extract. This refers to
252 /// possible scalar operand in vectorized instruction.
253 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
254                                     TargetLibraryInfo *TLI) {
255 
256   unsigned Opcode = UserInst->getOpcode();
257   switch (Opcode) {
258   case Instruction::Load: {
259     LoadInst *LI = cast<LoadInst>(UserInst);
260     return (LI->getPointerOperand() == Scalar);
261   }
262   case Instruction::Store: {
263     StoreInst *SI = cast<StoreInst>(UserInst);
264     return (SI->getPointerOperand() == Scalar);
265   }
266   case Instruction::Call: {
267     CallInst *CI = cast<CallInst>(UserInst);
268     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
269     if (hasVectorInstrinsicScalarOpd(ID, 1)) {
270       return (CI->getArgOperand(1) == Scalar);
271     }
272   }
273   default:
274     return false;
275   }
276 }
277 
278 /// \returns the AA location that is being access by the instruction.
279 static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) {
280   if (StoreInst *SI = dyn_cast<StoreInst>(I))
281     return MemoryLocation::get(SI);
282   if (LoadInst *LI = dyn_cast<LoadInst>(I))
283     return MemoryLocation::get(LI);
284   return MemoryLocation();
285 }
286 
287 /// \returns True if the instruction is not a volatile or atomic load/store.
288 static bool isSimple(Instruction *I) {
289   if (LoadInst *LI = dyn_cast<LoadInst>(I))
290     return LI->isSimple();
291   if (StoreInst *SI = dyn_cast<StoreInst>(I))
292     return SI->isSimple();
293   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
294     return !MI->isVolatile();
295   return true;
296 }
297 
298 namespace llvm {
299 namespace slpvectorizer {
300 /// Bottom Up SLP Vectorizer.
301 class BoUpSLP {
302 public:
303   typedef SmallVector<Value *, 8> ValueList;
304   typedef SmallVector<Instruction *, 16> InstrList;
305   typedef SmallPtrSet<Value *, 16> ValueSet;
306   typedef SmallVector<StoreInst *, 8> StoreList;
307 
308   BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti,
309           TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li,
310           DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB,
311           const DataLayout *DL)
312       : NumLoadsWantToKeepOrder(0), NumLoadsWantToChangeOrder(0), F(Func),
313         SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), DB(DB),
314         DL(DL), Builder(Se->getContext()) {
315     CodeMetrics::collectEphemeralValues(F, AC, EphValues);
316     // Use the vector register size specified by the target unless overridden
317     // by a command-line option.
318     // TODO: It would be better to limit the vectorization factor based on
319     //       data type rather than just register size. For example, x86 AVX has
320     //       256-bit registers, but it does not support integer operations
321     //       at that width (that requires AVX2).
322     if (MaxVectorRegSizeOption.getNumOccurrences())
323       MaxVecRegSize = MaxVectorRegSizeOption;
324     else
325       MaxVecRegSize = TTI->getRegisterBitWidth(true);
326 
327     MinVecRegSize = MinVectorRegSizeOption;
328   }
329 
330   /// \brief Vectorize the tree that starts with the elements in \p VL.
331   /// Returns the vectorized root.
332   Value *vectorizeTree();
333   /// Vectorize the tree but with the list of externally used values \p
334   /// ExternallyUsedValues. Values in this MapVector can be replaced but the
335   /// generated extractvalue instructions.
336   Value *vectorizeTree(MapVector<Value *, DebugLoc> &ExternallyUsedValues);
337 
338   /// \returns the cost incurred by unwanted spills and fills, caused by
339   /// holding live values over call sites.
340   int getSpillCost();
341 
342   /// \returns the vectorization cost of the subtree that starts at \p VL.
343   /// A negative number means that this is profitable.
344   int getTreeCost();
345 
346   /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
347   /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
348   void buildTree(ArrayRef<Value *> Roots,
349                  ArrayRef<Value *> UserIgnoreLst = None);
350   /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
351   /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking
352   /// into account (anf updating it, if required) list of externally used
353   /// values stored in \p ExternallyUsedValues.
354   void buildTree(ArrayRef<Value *> Roots,
355                  MapVector<Value *, DebugLoc> &ExternallyUsedValues,
356                  ArrayRef<Value *> UserIgnoreLst = None);
357 
358   /// Clear the internal data structures that are created by 'buildTree'.
359   void deleteTree() {
360     VectorizableTree.clear();
361     ScalarToTreeEntry.clear();
362     MustGather.clear();
363     ExternalUses.clear();
364     NumLoadsWantToKeepOrder = 0;
365     NumLoadsWantToChangeOrder = 0;
366     for (auto &Iter : BlocksSchedules) {
367       BlockScheduling *BS = Iter.second.get();
368       BS->clear();
369     }
370     MinBWs.clear();
371   }
372 
373   /// \brief Perform LICM and CSE on the newly generated gather sequences.
374   void optimizeGatherSequence();
375 
376   /// \returns true if it is beneficial to reverse the vector order.
377   bool shouldReorder() const {
378     return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder;
379   }
380 
381   /// \return The vector element size in bits to use when vectorizing the
382   /// expression tree ending at \p V. If V is a store, the size is the width of
383   /// the stored value. Otherwise, the size is the width of the largest loaded
384   /// value reaching V. This method is used by the vectorizer to calculate
385   /// vectorization factors.
386   unsigned getVectorElementSize(Value *V);
387 
388   /// Compute the minimum type sizes required to represent the entries in a
389   /// vectorizable tree.
390   void computeMinimumValueSizes();
391 
392   // \returns maximum vector register size as set by TTI or overridden by cl::opt.
393   unsigned getMaxVecRegSize() const {
394     return MaxVecRegSize;
395   }
396 
397   // \returns minimum vector register size as set by cl::opt.
398   unsigned getMinVecRegSize() const {
399     return MinVecRegSize;
400   }
401 
402   /// \brief Check if ArrayType or StructType is isomorphic to some VectorType.
403   ///
404   /// \returns number of elements in vector if isomorphism exists, 0 otherwise.
405   unsigned canMapToVector(Type *T, const DataLayout &DL) const;
406 
407   /// \returns True if the VectorizableTree is both tiny and not fully
408   /// vectorizable. We do not vectorize such trees.
409   bool isTreeTinyAndNotFullyVectorizable();
410 
411 private:
412   struct TreeEntry;
413 
414   /// \returns the cost of the vectorizable entry.
415   int getEntryCost(TreeEntry *E);
416 
417   /// This is the recursive part of buildTree.
418   void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth);
419 
420   /// \returns True if the ExtractElement/ExtractValue instructions in VL can
421   /// be vectorized to use the original vector (or aggregate "bitcast" to a vector).
422   bool canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const;
423 
424   /// Vectorize a single entry in the tree. VL icontains all isomorphic scalars
425   /// in order of its usage in a user program, for example ADD1, ADD2 and so on
426   /// or LOAD1 , LOAD2 etc.
427   Value *vectorizeTree(ArrayRef<Value *> VL, TreeEntry *E);
428 
429   /// Vectorize a single entry in the tree, starting in \p VL.
430   Value *vectorizeTree(ArrayRef<Value *> VL);
431 
432   /// \returns the pointer to the vectorized value if \p VL is already
433   /// vectorized, or NULL. They may happen in cycles.
434   Value *alreadyVectorized(ArrayRef<Value *> VL) const;
435 
436   /// \returns the scalarization cost for this type. Scalarization in this
437   /// context means the creation of vectors from a group of scalars.
438   int getGatherCost(Type *Ty);
439 
440   /// \returns the scalarization cost for this list of values. Assuming that
441   /// this subtree gets vectorized, we may need to extract the values from the
442   /// roots. This method calculates the cost of extracting the values.
443   int getGatherCost(ArrayRef<Value *> VL);
444 
445   /// \brief Set the Builder insert point to one after the last instruction in
446   /// the bundle
447   void setInsertPointAfterBundle(ArrayRef<Value *> VL);
448 
449   /// \returns a vector from a collection of scalars in \p VL.
450   Value *Gather(ArrayRef<Value *> VL, VectorType *Ty);
451 
452   /// \returns whether the VectorizableTree is fully vectorizable and will
453   /// be beneficial even the tree height is tiny.
454   bool isFullyVectorizableTinyTree();
455 
456   /// \reorder commutative operands in alt shuffle if they result in
457   ///  vectorized code.
458   void reorderAltShuffleOperands(ArrayRef<Value *> VL,
459                                  SmallVectorImpl<Value *> &Left,
460                                  SmallVectorImpl<Value *> &Right);
461   /// \reorder commutative operands to get better probability of
462   /// generating vectorized code.
463   void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
464                                       SmallVectorImpl<Value *> &Left,
465                                       SmallVectorImpl<Value *> &Right);
466   struct TreeEntry {
467     TreeEntry() : Scalars(), VectorizedValue(nullptr),
468     NeedToGather(0), NeedToShuffle(0) {}
469 
470     /// \returns true if the scalars in VL are equal to this entry.
471     bool isSame(ArrayRef<Value *> VL) const {
472       assert(VL.size() == Scalars.size() && "Invalid size");
473       return std::equal(VL.begin(), VL.end(), Scalars.begin());
474     }
475 
476     /// \returns true if the scalars in VL are found in this tree entry.
477     bool isFoundJumbled(ArrayRef<Value *> VL, const DataLayout &DL,
478                         ScalarEvolution &SE) const {
479       assert(VL.size() == Scalars.size() && "Invalid size");
480       SmallVector<Value *, 8> List;
481       if (!sortMemAccesses(VL, DL, SE, List))
482         return false;
483 
484       return std::equal(List.begin(), List.end(), Scalars.begin());
485     }
486 
487     /// A vector of scalars.
488     ValueList Scalars;
489 
490     /// The Scalars are vectorized into this value. It is initialized to Null.
491     Value *VectorizedValue;
492 
493     /// Do we need to gather this sequence ?
494     bool NeedToGather;
495 
496     /// Do we need to shuffle the load ?
497     bool NeedToShuffle;
498   };
499 
500   /// Create a new VectorizableTree entry.
501   TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized,
502                           bool NeedToShuffle) {
503     VectorizableTree.emplace_back();
504     int idx = VectorizableTree.size() - 1;
505     TreeEntry *Last = &VectorizableTree[idx];
506     Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end());
507     Last->NeedToGather = !Vectorized;
508     Last->NeedToShuffle = NeedToShuffle;
509     if (Vectorized) {
510       for (int i = 0, e = VL.size(); i != e; ++i) {
511         assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!");
512         ScalarToTreeEntry[VL[i]] = idx;
513       }
514     } else {
515       MustGather.insert(VL.begin(), VL.end());
516     }
517     return Last;
518   }
519 
520   /// -- Vectorization State --
521   /// Holds all of the tree entries.
522   std::vector<TreeEntry> VectorizableTree;
523 
524   /// Maps a specific scalar to its tree entry.
525   SmallDenseMap<Value*, int> ScalarToTreeEntry;
526 
527   /// A list of scalars that we found that we need to keep as scalars.
528   ValueSet MustGather;
529 
530   /// This POD struct describes one external user in the vectorized tree.
531   struct ExternalUser {
532     ExternalUser (Value *S, llvm::User *U, int L) :
533       Scalar(S), User(U), Lane(L){}
534     // Which scalar in our function.
535     Value *Scalar;
536     // Which user that uses the scalar.
537     llvm::User *User;
538     // Which lane does the scalar belong to.
539     int Lane;
540   };
541   typedef SmallVector<ExternalUser, 16> UserList;
542 
543   /// Checks if two instructions may access the same memory.
544   ///
545   /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
546   /// is invariant in the calling loop.
547   bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1,
548                  Instruction *Inst2) {
549 
550     // First check if the result is already in the cache.
551     AliasCacheKey key = std::make_pair(Inst1, Inst2);
552     Optional<bool> &result = AliasCache[key];
553     if (result.hasValue()) {
554       return result.getValue();
555     }
556     MemoryLocation Loc2 = getLocation(Inst2, AA);
557     bool aliased = true;
558     if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) {
559       // Do the alias check.
560       aliased = AA->alias(Loc1, Loc2);
561     }
562     // Store the result in the cache.
563     result = aliased;
564     return aliased;
565   }
566 
567   typedef std::pair<Instruction *, Instruction *> AliasCacheKey;
568 
569   /// Cache for alias results.
570   /// TODO: consider moving this to the AliasAnalysis itself.
571   DenseMap<AliasCacheKey, Optional<bool>> AliasCache;
572 
573   /// Removes an instruction from its block and eventually deletes it.
574   /// It's like Instruction::eraseFromParent() except that the actual deletion
575   /// is delayed until BoUpSLP is destructed.
576   /// This is required to ensure that there are no incorrect collisions in the
577   /// AliasCache, which can happen if a new instruction is allocated at the
578   /// same address as a previously deleted instruction.
579   void eraseInstruction(Instruction *I) {
580     I->removeFromParent();
581     I->dropAllReferences();
582     DeletedInstructions.push_back(std::unique_ptr<Instruction>(I));
583   }
584 
585   /// Temporary store for deleted instructions. Instructions will be deleted
586   /// eventually when the BoUpSLP is destructed.
587   SmallVector<std::unique_ptr<Instruction>, 8> DeletedInstructions;
588 
589   /// A list of values that need to extracted out of the tree.
590   /// This list holds pairs of (Internal Scalar : External User). External User
591   /// can be nullptr, it means that this Internal Scalar will be used later,
592   /// after vectorization.
593   UserList ExternalUses;
594 
595   /// Values used only by @llvm.assume calls.
596   SmallPtrSet<const Value *, 32> EphValues;
597 
598   /// Holds all of the instructions that we gathered.
599   SetVector<Instruction *> GatherSeq;
600   /// A list of blocks that we are going to CSE.
601   SetVector<BasicBlock *> CSEBlocks;
602 
603   /// Contains all scheduling relevant data for an instruction.
604   /// A ScheduleData either represents a single instruction or a member of an
605   /// instruction bundle (= a group of instructions which is combined into a
606   /// vector instruction).
607   struct ScheduleData {
608 
609     // The initial value for the dependency counters. It means that the
610     // dependencies are not calculated yet.
611     enum { InvalidDeps = -1 };
612 
613     ScheduleData()
614         : Inst(nullptr), FirstInBundle(nullptr), NextInBundle(nullptr),
615           NextLoadStore(nullptr), SchedulingRegionID(0), SchedulingPriority(0),
616           Dependencies(InvalidDeps), UnscheduledDeps(InvalidDeps),
617           UnscheduledDepsInBundle(InvalidDeps), IsScheduled(false) {}
618 
619     void init(int BlockSchedulingRegionID) {
620       FirstInBundle = this;
621       NextInBundle = nullptr;
622       NextLoadStore = nullptr;
623       IsScheduled = false;
624       SchedulingRegionID = BlockSchedulingRegionID;
625       UnscheduledDepsInBundle = UnscheduledDeps;
626       clearDependencies();
627     }
628 
629     /// Returns true if the dependency information has been calculated.
630     bool hasValidDependencies() const { return Dependencies != InvalidDeps; }
631 
632     /// Returns true for single instructions and for bundle representatives
633     /// (= the head of a bundle).
634     bool isSchedulingEntity() const { return FirstInBundle == this; }
635 
636     /// Returns true if it represents an instruction bundle and not only a
637     /// single instruction.
638     bool isPartOfBundle() const {
639       return NextInBundle != nullptr || FirstInBundle != this;
640     }
641 
642     /// Returns true if it is ready for scheduling, i.e. it has no more
643     /// unscheduled depending instructions/bundles.
644     bool isReady() const {
645       assert(isSchedulingEntity() &&
646              "can't consider non-scheduling entity for ready list");
647       return UnscheduledDepsInBundle == 0 && !IsScheduled;
648     }
649 
650     /// Modifies the number of unscheduled dependencies, also updating it for
651     /// the whole bundle.
652     int incrementUnscheduledDeps(int Incr) {
653       UnscheduledDeps += Incr;
654       return FirstInBundle->UnscheduledDepsInBundle += Incr;
655     }
656 
657     /// Sets the number of unscheduled dependencies to the number of
658     /// dependencies.
659     void resetUnscheduledDeps() {
660       incrementUnscheduledDeps(Dependencies - UnscheduledDeps);
661     }
662 
663     /// Clears all dependency information.
664     void clearDependencies() {
665       Dependencies = InvalidDeps;
666       resetUnscheduledDeps();
667       MemoryDependencies.clear();
668     }
669 
670     void dump(raw_ostream &os) const {
671       if (!isSchedulingEntity()) {
672         os << "/ " << *Inst;
673       } else if (NextInBundle) {
674         os << '[' << *Inst;
675         ScheduleData *SD = NextInBundle;
676         while (SD) {
677           os << ';' << *SD->Inst;
678           SD = SD->NextInBundle;
679         }
680         os << ']';
681       } else {
682         os << *Inst;
683       }
684     }
685 
686     Instruction *Inst;
687 
688     /// Points to the head in an instruction bundle (and always to this for
689     /// single instructions).
690     ScheduleData *FirstInBundle;
691 
692     /// Single linked list of all instructions in a bundle. Null if it is a
693     /// single instruction.
694     ScheduleData *NextInBundle;
695 
696     /// Single linked list of all memory instructions (e.g. load, store, call)
697     /// in the block - until the end of the scheduling region.
698     ScheduleData *NextLoadStore;
699 
700     /// The dependent memory instructions.
701     /// This list is derived on demand in calculateDependencies().
702     SmallVector<ScheduleData *, 4> MemoryDependencies;
703 
704     /// This ScheduleData is in the current scheduling region if this matches
705     /// the current SchedulingRegionID of BlockScheduling.
706     int SchedulingRegionID;
707 
708     /// Used for getting a "good" final ordering of instructions.
709     int SchedulingPriority;
710 
711     /// The number of dependencies. Constitutes of the number of users of the
712     /// instruction plus the number of dependent memory instructions (if any).
713     /// This value is calculated on demand.
714     /// If InvalidDeps, the number of dependencies is not calculated yet.
715     ///
716     int Dependencies;
717 
718     /// The number of dependencies minus the number of dependencies of scheduled
719     /// instructions. As soon as this is zero, the instruction/bundle gets ready
720     /// for scheduling.
721     /// Note that this is negative as long as Dependencies is not calculated.
722     int UnscheduledDeps;
723 
724     /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for
725     /// single instructions.
726     int UnscheduledDepsInBundle;
727 
728     /// True if this instruction is scheduled (or considered as scheduled in the
729     /// dry-run).
730     bool IsScheduled;
731   };
732 
733 #ifndef NDEBUG
734   friend inline raw_ostream &operator<<(raw_ostream &os,
735                                         const BoUpSLP::ScheduleData &SD) {
736     SD.dump(os);
737     return os;
738   }
739 #endif
740 
741   /// Contains all scheduling data for a basic block.
742   ///
743   struct BlockScheduling {
744 
745     BlockScheduling(BasicBlock *BB)
746         : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize),
747           ScheduleStart(nullptr), ScheduleEnd(nullptr),
748           FirstLoadStoreInRegion(nullptr), LastLoadStoreInRegion(nullptr),
749           ScheduleRegionSize(0),
750           ScheduleRegionSizeLimit(ScheduleRegionSizeBudget),
751           // Make sure that the initial SchedulingRegionID is greater than the
752           // initial SchedulingRegionID in ScheduleData (which is 0).
753           SchedulingRegionID(1) {}
754 
755     void clear() {
756       ReadyInsts.clear();
757       ScheduleStart = nullptr;
758       ScheduleEnd = nullptr;
759       FirstLoadStoreInRegion = nullptr;
760       LastLoadStoreInRegion = nullptr;
761 
762       // Reduce the maximum schedule region size by the size of the
763       // previous scheduling run.
764       ScheduleRegionSizeLimit -= ScheduleRegionSize;
765       if (ScheduleRegionSizeLimit < MinScheduleRegionSize)
766         ScheduleRegionSizeLimit = MinScheduleRegionSize;
767       ScheduleRegionSize = 0;
768 
769       // Make a new scheduling region, i.e. all existing ScheduleData is not
770       // in the new region yet.
771       ++SchedulingRegionID;
772     }
773 
774     ScheduleData *getScheduleData(Value *V) {
775       ScheduleData *SD = ScheduleDataMap[V];
776       if (SD && SD->SchedulingRegionID == SchedulingRegionID)
777         return SD;
778       return nullptr;
779     }
780 
781     bool isInSchedulingRegion(ScheduleData *SD) {
782       return SD->SchedulingRegionID == SchedulingRegionID;
783     }
784 
785     /// Marks an instruction as scheduled and puts all dependent ready
786     /// instructions into the ready-list.
787     template <typename ReadyListType>
788     void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
789       SD->IsScheduled = true;
790       DEBUG(dbgs() << "SLP:   schedule " << *SD << "\n");
791 
792       ScheduleData *BundleMember = SD;
793       while (BundleMember) {
794         // Handle the def-use chain dependencies.
795         for (Use &U : BundleMember->Inst->operands()) {
796           ScheduleData *OpDef = getScheduleData(U.get());
797           if (OpDef && OpDef->hasValidDependencies() &&
798               OpDef->incrementUnscheduledDeps(-1) == 0) {
799             // There are no more unscheduled dependencies after decrementing,
800             // so we can put the dependent instruction into the ready list.
801             ScheduleData *DepBundle = OpDef->FirstInBundle;
802             assert(!DepBundle->IsScheduled &&
803                    "already scheduled bundle gets ready");
804             ReadyList.insert(DepBundle);
805             DEBUG(dbgs() << "SLP:    gets ready (def): " << *DepBundle << "\n");
806           }
807         }
808         // Handle the memory dependencies.
809         for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) {
810           if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) {
811             // There are no more unscheduled dependencies after decrementing,
812             // so we can put the dependent instruction into the ready list.
813             ScheduleData *DepBundle = MemoryDepSD->FirstInBundle;
814             assert(!DepBundle->IsScheduled &&
815                    "already scheduled bundle gets ready");
816             ReadyList.insert(DepBundle);
817             DEBUG(dbgs() << "SLP:    gets ready (mem): " << *DepBundle << "\n");
818           }
819         }
820         BundleMember = BundleMember->NextInBundle;
821       }
822     }
823 
824     /// Put all instructions into the ReadyList which are ready for scheduling.
825     template <typename ReadyListType>
826     void initialFillReadyList(ReadyListType &ReadyList) {
827       for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
828         ScheduleData *SD = getScheduleData(I);
829         if (SD->isSchedulingEntity() && SD->isReady()) {
830           ReadyList.insert(SD);
831           DEBUG(dbgs() << "SLP:    initially in ready list: " << *I << "\n");
832         }
833       }
834     }
835 
836     /// Checks if a bundle of instructions can be scheduled, i.e. has no
837     /// cyclic dependencies. This is only a dry-run, no instructions are
838     /// actually moved at this stage.
839     bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP);
840 
841     /// Un-bundles a group of instructions.
842     void cancelScheduling(ArrayRef<Value *> VL);
843 
844     /// Extends the scheduling region so that V is inside the region.
845     /// \returns true if the region size is within the limit.
846     bool extendSchedulingRegion(Value *V);
847 
848     /// Initialize the ScheduleData structures for new instructions in the
849     /// scheduling region.
850     void initScheduleData(Instruction *FromI, Instruction *ToI,
851                           ScheduleData *PrevLoadStore,
852                           ScheduleData *NextLoadStore);
853 
854     /// Updates the dependency information of a bundle and of all instructions/
855     /// bundles which depend on the original bundle.
856     void calculateDependencies(ScheduleData *SD, bool InsertInReadyList,
857                                BoUpSLP *SLP);
858 
859     /// Sets all instruction in the scheduling region to un-scheduled.
860     void resetSchedule();
861 
862     BasicBlock *BB;
863 
864     /// Simple memory allocation for ScheduleData.
865     std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks;
866 
867     /// The size of a ScheduleData array in ScheduleDataChunks.
868     int ChunkSize;
869 
870     /// The allocator position in the current chunk, which is the last entry
871     /// of ScheduleDataChunks.
872     int ChunkPos;
873 
874     /// Attaches ScheduleData to Instruction.
875     /// Note that the mapping survives during all vectorization iterations, i.e.
876     /// ScheduleData structures are recycled.
877     DenseMap<Value *, ScheduleData *> ScheduleDataMap;
878 
879     struct ReadyList : SmallVector<ScheduleData *, 8> {
880       void insert(ScheduleData *SD) { push_back(SD); }
881     };
882 
883     /// The ready-list for scheduling (only used for the dry-run).
884     ReadyList ReadyInsts;
885 
886     /// The first instruction of the scheduling region.
887     Instruction *ScheduleStart;
888 
889     /// The first instruction _after_ the scheduling region.
890     Instruction *ScheduleEnd;
891 
892     /// The first memory accessing instruction in the scheduling region
893     /// (can be null).
894     ScheduleData *FirstLoadStoreInRegion;
895 
896     /// The last memory accessing instruction in the scheduling region
897     /// (can be null).
898     ScheduleData *LastLoadStoreInRegion;
899 
900     /// The current size of the scheduling region.
901     int ScheduleRegionSize;
902 
903     /// The maximum size allowed for the scheduling region.
904     int ScheduleRegionSizeLimit;
905 
906     /// The ID of the scheduling region. For a new vectorization iteration this
907     /// is incremented which "removes" all ScheduleData from the region.
908     int SchedulingRegionID;
909   };
910 
911   /// Attaches the BlockScheduling structures to basic blocks.
912   MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules;
913 
914   /// Performs the "real" scheduling. Done before vectorization is actually
915   /// performed in a basic block.
916   void scheduleBlock(BlockScheduling *BS);
917 
918   /// List of users to ignore during scheduling and that don't need extracting.
919   ArrayRef<Value *> UserIgnoreList;
920 
921   // Number of load bundles that contain consecutive loads.
922   int NumLoadsWantToKeepOrder;
923 
924   // Number of load bundles that contain consecutive loads in reversed order.
925   int NumLoadsWantToChangeOrder;
926 
927   // Analysis and block reference.
928   Function *F;
929   ScalarEvolution *SE;
930   TargetTransformInfo *TTI;
931   TargetLibraryInfo *TLI;
932   AliasAnalysis *AA;
933   LoopInfo *LI;
934   DominatorTree *DT;
935   AssumptionCache *AC;
936   DemandedBits *DB;
937   const DataLayout *DL;
938   unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt.
939   unsigned MinVecRegSize; // Set by cl::opt (default: 128).
940   /// Instruction builder to construct the vectorized tree.
941   IRBuilder<> Builder;
942 
943   /// A map of scalar integer values to the smallest bit width with which they
944   /// can legally be represented. The values map to (width, signed) pairs,
945   /// where "width" indicates the minimum bit width and "signed" is True if the
946   /// value must be signed-extended, rather than zero-extended, back to its
947   /// original width.
948   MapVector<Value *, std::pair<uint64_t, bool>> MinBWs;
949 };
950 
951 } // end namespace llvm
952 } // end namespace slpvectorizer
953 
954 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
955                         ArrayRef<Value *> UserIgnoreLst) {
956   MapVector<Value *, DebugLoc> ExternallyUsedValues;
957   buildTree(Roots, ExternallyUsedValues, UserIgnoreLst);
958 }
959 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
960                         MapVector<Value *, DebugLoc> &ExternallyUsedValues,
961                         ArrayRef<Value *> UserIgnoreLst) {
962   deleteTree();
963   UserIgnoreList = UserIgnoreLst;
964   if (!allSameType(Roots))
965     return;
966   buildTree_rec(Roots, 0);
967 
968   // Collect the values that we need to extract from the tree.
969   for (TreeEntry &EIdx : VectorizableTree) {
970     TreeEntry *Entry = &EIdx;
971 
972     // For each lane:
973     for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
974       Value *Scalar = Entry->Scalars[Lane];
975 
976       // No need to handle users of gathered values.
977       if (Entry->NeedToGather)
978         continue;
979 
980       // Check if the scalar is externally used as an extra arg.
981       auto ExtI = ExternallyUsedValues.find(Scalar);
982       if (ExtI != ExternallyUsedValues.end()) {
983         DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " <<
984               Lane << " from " << *Scalar << ".\n");
985         ExternalUses.emplace_back(Scalar, nullptr, Lane);
986         continue;
987       }
988       for (User *U : Scalar->users()) {
989         DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
990 
991         Instruction *UserInst = dyn_cast<Instruction>(U);
992         if (!UserInst)
993           continue;
994 
995         // Skip in-tree scalars that become vectors
996         if (ScalarToTreeEntry.count(U)) {
997           int Idx = ScalarToTreeEntry[U];
998           TreeEntry *UseEntry = &VectorizableTree[Idx];
999           Value *UseScalar = UseEntry->Scalars[0];
1000           // Some in-tree scalars will remain as scalar in vectorized
1001           // instructions. If that is the case, the one in Lane 0 will
1002           // be used.
1003           if (UseScalar != U ||
1004               !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) {
1005             DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
1006                          << ".\n");
1007             assert(!VectorizableTree[Idx].NeedToGather && "Bad state");
1008             continue;
1009           }
1010         }
1011 
1012         // Ignore users in the user ignore list.
1013         if (is_contained(UserIgnoreList, UserInst))
1014           continue;
1015 
1016         DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " <<
1017               Lane << " from " << *Scalar << ".\n");
1018         ExternalUses.push_back(ExternalUser(Scalar, U, Lane));
1019       }
1020     }
1021   }
1022 }
1023 
1024 
1025 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
1026   bool isAltShuffle = false;
1027   assert((allConstant(VL) || allSameType(VL)) && "Invalid types!");
1028 
1029   if (Depth == RecursionMaxDepth) {
1030     DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
1031     newTreeEntry(VL, false, false);
1032     return;
1033   }
1034 
1035   // Don't handle vectors.
1036   if (VL[0]->getType()->isVectorTy()) {
1037     DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
1038     newTreeEntry(VL, false, false);
1039     return;
1040   }
1041 
1042   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
1043     if (SI->getValueOperand()->getType()->isVectorTy()) {
1044       DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
1045       newTreeEntry(VL, false, false);
1046       return;
1047     }
1048   unsigned Opcode = getSameOpcode(VL);
1049 
1050   // Check that this shuffle vector refers to the alternate
1051   // sequence of opcodes.
1052   if (Opcode == Instruction::ShuffleVector) {
1053     Instruction *I0 = dyn_cast<Instruction>(VL[0]);
1054     unsigned Op = I0->getOpcode();
1055     if (Op != Instruction::ShuffleVector)
1056       isAltShuffle = true;
1057   }
1058 
1059   // If all of the operands are identical or constant we have a simple solution.
1060   if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !Opcode) {
1061     DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
1062     newTreeEntry(VL, false, false);
1063     return;
1064   }
1065 
1066   // We now know that this is a vector of instructions of the same type from
1067   // the same block.
1068 
1069   // Don't vectorize ephemeral values.
1070   for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1071     if (EphValues.count(VL[i])) {
1072       DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
1073             ") is ephemeral.\n");
1074       newTreeEntry(VL, false, false);
1075       return;
1076     }
1077   }
1078 
1079   // Check if this is a duplicate of another entry.
1080   if (ScalarToTreeEntry.count(VL[0])) {
1081     int Idx = ScalarToTreeEntry[VL[0]];
1082     TreeEntry *E = &VectorizableTree[Idx];
1083     for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1084       DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n");
1085       if (E->Scalars[i] != VL[i]) {
1086         DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
1087         newTreeEntry(VL, false, false);
1088         return;
1089       }
1090     }
1091     DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n");
1092     return;
1093   }
1094 
1095   // Check that none of the instructions in the bundle are already in the tree.
1096   for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1097     if (ScalarToTreeEntry.count(VL[i])) {
1098       DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
1099             ") is already in tree.\n");
1100       newTreeEntry(VL, false, false);
1101       return;
1102     }
1103   }
1104 
1105   // If any of the scalars is marked as a value that needs to stay scalar then
1106   // we need to gather the scalars.
1107   for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1108     if (MustGather.count(VL[i])) {
1109       DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
1110       newTreeEntry(VL, false, false);
1111       return;
1112     }
1113   }
1114 
1115   // Check that all of the users of the scalars that we want to vectorize are
1116   // schedulable.
1117   Instruction *VL0 = cast<Instruction>(VL[0]);
1118   BasicBlock *BB = cast<Instruction>(VL0)->getParent();
1119 
1120   if (!DT->isReachableFromEntry(BB)) {
1121     // Don't go into unreachable blocks. They may contain instructions with
1122     // dependency cycles which confuse the final scheduling.
1123     DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
1124     newTreeEntry(VL, false, false);
1125     return;
1126   }
1127 
1128   // Check that every instructions appears once in this bundle.
1129   for (unsigned i = 0, e = VL.size(); i < e; ++i)
1130     for (unsigned j = i+1; j < e; ++j)
1131       if (VL[i] == VL[j]) {
1132         DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
1133         newTreeEntry(VL, false, false);
1134         return;
1135       }
1136 
1137   auto &BSRef = BlocksSchedules[BB];
1138   if (!BSRef) {
1139     BSRef = llvm::make_unique<BlockScheduling>(BB);
1140   }
1141   BlockScheduling &BS = *BSRef.get();
1142 
1143   if (!BS.tryScheduleBundle(VL, this)) {
1144     DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
1145     assert((!BS.getScheduleData(VL[0]) ||
1146             !BS.getScheduleData(VL[0])->isPartOfBundle()) &&
1147            "tryScheduleBundle should cancelScheduling on failure");
1148     newTreeEntry(VL, false, false);
1149     return;
1150   }
1151   DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
1152 
1153   switch (Opcode) {
1154     case Instruction::PHI: {
1155       PHINode *PH = dyn_cast<PHINode>(VL0);
1156 
1157       // Check for terminator values (e.g. invoke).
1158       for (unsigned j = 0; j < VL.size(); ++j)
1159         for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
1160           TerminatorInst *Term = dyn_cast<TerminatorInst>(
1161               cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i)));
1162           if (Term) {
1163             DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n");
1164             BS.cancelScheduling(VL);
1165             newTreeEntry(VL, false, false);
1166             return;
1167           }
1168         }
1169 
1170       newTreeEntry(VL, true, false);
1171       DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
1172 
1173       for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
1174         ValueList Operands;
1175         // Prepare the operand vector.
1176         for (Value *j : VL)
1177           Operands.push_back(cast<PHINode>(j)->getIncomingValueForBlock(
1178               PH->getIncomingBlock(i)));
1179 
1180         buildTree_rec(Operands, Depth + 1);
1181       }
1182       return;
1183     }
1184     case Instruction::ExtractValue:
1185     case Instruction::ExtractElement: {
1186       bool Reuse = canReuseExtract(VL, Opcode);
1187       if (Reuse) {
1188         DEBUG(dbgs() << "SLP: Reusing extract sequence.\n");
1189       } else {
1190         BS.cancelScheduling(VL);
1191       }
1192       newTreeEntry(VL, Reuse, false);
1193       return;
1194     }
1195     case Instruction::Load: {
1196       // Check that a vectorized load would load the same memory as a scalar
1197       // load.
1198       // For example we don't want vectorize loads that are smaller than 8 bit.
1199       // Even though we have a packed struct {<i2, i2, i2, i2>} LLVM treats
1200       // loading/storing it as an i8 struct. If we vectorize loads/stores from
1201       // such a struct we read/write packed bits disagreeing with the
1202       // unvectorized version.
1203       Type *ScalarTy = VL[0]->getType();
1204 
1205       if (DL->getTypeSizeInBits(ScalarTy) !=
1206           DL->getTypeAllocSizeInBits(ScalarTy)) {
1207         BS.cancelScheduling(VL);
1208         newTreeEntry(VL, false, false);
1209         DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
1210         return;
1211       }
1212 
1213       // Make sure all loads in the bundle are simple - we can't vectorize
1214       // atomic or volatile loads.
1215       for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
1216         LoadInst *L = cast<LoadInst>(VL[i]);
1217         if (!L->isSimple()) {
1218           BS.cancelScheduling(VL);
1219           newTreeEntry(VL, false, false);
1220           DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
1221           return;
1222         }
1223       }
1224 
1225       // Check if the loads are consecutive, reversed, or neither.
1226       bool Consecutive = true;
1227       bool ReverseConsecutive = true;
1228       for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
1229         if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) {
1230           Consecutive = false;
1231           break;
1232         } else {
1233           ReverseConsecutive = false;
1234         }
1235       }
1236 
1237       if (Consecutive) {
1238         ++NumLoadsWantToKeepOrder;
1239         newTreeEntry(VL, true, false);
1240         DEBUG(dbgs() << "SLP: added a vector of loads.\n");
1241         return;
1242       }
1243 
1244       // If none of the load pairs were consecutive when checked in order,
1245       // check the reverse order.
1246       if (ReverseConsecutive)
1247         for (unsigned i = VL.size() - 1; i > 0; --i)
1248           if (!isConsecutiveAccess(VL[i], VL[i - 1], *DL, *SE)) {
1249             ReverseConsecutive = false;
1250             break;
1251           }
1252 
1253       if (VL.size() > 2 && !ReverseConsecutive) {
1254         bool ShuffledLoads = true;
1255         SmallVector<Value *, 8> Sorted;
1256         if (sortMemAccesses(VL, *DL, *SE, Sorted)) {
1257           auto NewVL = makeArrayRef(Sorted.begin(), Sorted.end());
1258           for (unsigned i = 0, e = NewVL.size() - 1; i < e; ++i) {
1259             if (!isConsecutiveAccess(NewVL[i], NewVL[i + 1], *DL, *SE)) {
1260               ShuffledLoads = false;
1261               break;
1262             }
1263           }
1264           if (ShuffledLoads) {
1265             newTreeEntry(NewVL, true, true);
1266             return;
1267           }
1268         }
1269       }
1270 
1271       BS.cancelScheduling(VL);
1272       newTreeEntry(VL, false, false);
1273 
1274       if (ReverseConsecutive) {
1275         ++NumLoadsWantToChangeOrder;
1276         DEBUG(dbgs() << "SLP: Gathering reversed loads.\n");
1277       } else {
1278         DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
1279       }
1280       return;
1281     }
1282     case Instruction::ZExt:
1283     case Instruction::SExt:
1284     case Instruction::FPToUI:
1285     case Instruction::FPToSI:
1286     case Instruction::FPExt:
1287     case Instruction::PtrToInt:
1288     case Instruction::IntToPtr:
1289     case Instruction::SIToFP:
1290     case Instruction::UIToFP:
1291     case Instruction::Trunc:
1292     case Instruction::FPTrunc:
1293     case Instruction::BitCast: {
1294       Type *SrcTy = VL0->getOperand(0)->getType();
1295       for (Value *Val : VL) {
1296         Type *Ty = cast<Instruction>(Val)->getOperand(0)->getType();
1297         if (Ty != SrcTy || !isValidElementType(Ty)) {
1298           BS.cancelScheduling(VL);
1299           newTreeEntry(VL, false, false);
1300           DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n");
1301           return;
1302         }
1303       }
1304       newTreeEntry(VL, true, false);
1305       DEBUG(dbgs() << "SLP: added a vector of casts.\n");
1306 
1307       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1308         ValueList Operands;
1309         // Prepare the operand vector.
1310         for (Value *j : VL)
1311           Operands.push_back(cast<Instruction>(j)->getOperand(i));
1312 
1313         buildTree_rec(Operands, Depth+1);
1314       }
1315       return;
1316     }
1317     case Instruction::ICmp:
1318     case Instruction::FCmp: {
1319       // Check that all of the compares have the same predicate.
1320       CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
1321       Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType();
1322       for (unsigned i = 1, e = VL.size(); i < e; ++i) {
1323         CmpInst *Cmp = cast<CmpInst>(VL[i]);
1324         if (Cmp->getPredicate() != P0 ||
1325             Cmp->getOperand(0)->getType() != ComparedTy) {
1326           BS.cancelScheduling(VL);
1327           newTreeEntry(VL, false, false);
1328           DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n");
1329           return;
1330         }
1331       }
1332 
1333       newTreeEntry(VL, true, false);
1334       DEBUG(dbgs() << "SLP: added a vector of compares.\n");
1335 
1336       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1337         ValueList Operands;
1338         // Prepare the operand vector.
1339         for (Value *j : VL)
1340           Operands.push_back(cast<Instruction>(j)->getOperand(i));
1341 
1342         buildTree_rec(Operands, Depth+1);
1343       }
1344       return;
1345     }
1346     case Instruction::Select:
1347     case Instruction::Add:
1348     case Instruction::FAdd:
1349     case Instruction::Sub:
1350     case Instruction::FSub:
1351     case Instruction::Mul:
1352     case Instruction::FMul:
1353     case Instruction::UDiv:
1354     case Instruction::SDiv:
1355     case Instruction::FDiv:
1356     case Instruction::URem:
1357     case Instruction::SRem:
1358     case Instruction::FRem:
1359     case Instruction::Shl:
1360     case Instruction::LShr:
1361     case Instruction::AShr:
1362     case Instruction::And:
1363     case Instruction::Or:
1364     case Instruction::Xor: {
1365       newTreeEntry(VL, true, false);
1366       DEBUG(dbgs() << "SLP: added a vector of bin op.\n");
1367 
1368       // Sort operands of the instructions so that each side is more likely to
1369       // have the same opcode.
1370       if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
1371         ValueList Left, Right;
1372         reorderInputsAccordingToOpcode(VL, Left, Right);
1373         buildTree_rec(Left, Depth + 1);
1374         buildTree_rec(Right, Depth + 1);
1375         return;
1376       }
1377 
1378       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1379         ValueList Operands;
1380         // Prepare the operand vector.
1381         for (Value *j : VL)
1382           Operands.push_back(cast<Instruction>(j)->getOperand(i));
1383 
1384         buildTree_rec(Operands, Depth+1);
1385       }
1386       return;
1387     }
1388     case Instruction::GetElementPtr: {
1389       // We don't combine GEPs with complicated (nested) indexing.
1390       for (Value *Val : VL) {
1391         if (cast<Instruction>(Val)->getNumOperands() != 2) {
1392           DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
1393           BS.cancelScheduling(VL);
1394           newTreeEntry(VL, false, false);
1395           return;
1396         }
1397       }
1398 
1399       // We can't combine several GEPs into one vector if they operate on
1400       // different types.
1401       Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType();
1402       for (Value *Val : VL) {
1403         Type *CurTy = cast<Instruction>(Val)->getOperand(0)->getType();
1404         if (Ty0 != CurTy) {
1405           DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n");
1406           BS.cancelScheduling(VL);
1407           newTreeEntry(VL, false, false);
1408           return;
1409         }
1410       }
1411 
1412       // We don't combine GEPs with non-constant indexes.
1413       for (Value *Val : VL) {
1414         auto Op = cast<Instruction>(Val)->getOperand(1);
1415         if (!isa<ConstantInt>(Op)) {
1416           DEBUG(
1417               dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n");
1418           BS.cancelScheduling(VL);
1419           newTreeEntry(VL, false, false);
1420           return;
1421         }
1422       }
1423 
1424       newTreeEntry(VL, true, false);
1425       DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
1426       for (unsigned i = 0, e = 2; i < e; ++i) {
1427         ValueList Operands;
1428         // Prepare the operand vector.
1429         for (Value *j : VL)
1430           Operands.push_back(cast<Instruction>(j)->getOperand(i));
1431 
1432         buildTree_rec(Operands, Depth + 1);
1433       }
1434       return;
1435     }
1436     case Instruction::Store: {
1437       // Check if the stores are consecutive or of we need to swizzle them.
1438       for (unsigned i = 0, e = VL.size() - 1; i < e; ++i)
1439         if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) {
1440           BS.cancelScheduling(VL);
1441           newTreeEntry(VL, false, false);
1442           DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
1443           return;
1444         }
1445 
1446       newTreeEntry(VL, true, false);
1447       DEBUG(dbgs() << "SLP: added a vector of stores.\n");
1448 
1449       ValueList Operands;
1450       for (Value *j : VL)
1451         Operands.push_back(cast<Instruction>(j)->getOperand(0));
1452 
1453       buildTree_rec(Operands, Depth + 1);
1454       return;
1455     }
1456     case Instruction::Call: {
1457       // Check if the calls are all to the same vectorizable intrinsic.
1458       CallInst *CI = cast<CallInst>(VL[0]);
1459       // Check if this is an Intrinsic call or something that can be
1460       // represented by an intrinsic call
1461       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
1462       if (!isTriviallyVectorizable(ID)) {
1463         BS.cancelScheduling(VL);
1464         newTreeEntry(VL, false, false);
1465         DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
1466         return;
1467       }
1468       Function *Int = CI->getCalledFunction();
1469       Value *A1I = nullptr;
1470       if (hasVectorInstrinsicScalarOpd(ID, 1))
1471         A1I = CI->getArgOperand(1);
1472       for (unsigned i = 1, e = VL.size(); i != e; ++i) {
1473         CallInst *CI2 = dyn_cast<CallInst>(VL[i]);
1474         if (!CI2 || CI2->getCalledFunction() != Int ||
1475             getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
1476             !CI->hasIdenticalOperandBundleSchema(*CI2)) {
1477           BS.cancelScheduling(VL);
1478           newTreeEntry(VL, false, false);
1479           DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i]
1480                        << "\n");
1481           return;
1482         }
1483         // ctlz,cttz and powi are special intrinsics whose second argument
1484         // should be same in order for them to be vectorized.
1485         if (hasVectorInstrinsicScalarOpd(ID, 1)) {
1486           Value *A1J = CI2->getArgOperand(1);
1487           if (A1I != A1J) {
1488             BS.cancelScheduling(VL);
1489             newTreeEntry(VL, false, false);
1490             DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
1491                          << " argument "<< A1I<<"!=" << A1J
1492                          << "\n");
1493             return;
1494           }
1495         }
1496         // Verify that the bundle operands are identical between the two calls.
1497         if (CI->hasOperandBundles() &&
1498             !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(),
1499                         CI->op_begin() + CI->getBundleOperandsEndIndex(),
1500                         CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
1501           BS.cancelScheduling(VL);
1502           newTreeEntry(VL, false, false);
1503           DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!="
1504                        << *VL[i] << '\n');
1505           return;
1506         }
1507       }
1508 
1509       newTreeEntry(VL, true, false);
1510       for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
1511         ValueList Operands;
1512         // Prepare the operand vector.
1513         for (Value *j : VL) {
1514           CallInst *CI2 = dyn_cast<CallInst>(j);
1515           Operands.push_back(CI2->getArgOperand(i));
1516         }
1517         buildTree_rec(Operands, Depth + 1);
1518       }
1519       return;
1520     }
1521     case Instruction::ShuffleVector: {
1522       // If this is not an alternate sequence of opcode like add-sub
1523       // then do not vectorize this instruction.
1524       if (!isAltShuffle) {
1525         BS.cancelScheduling(VL);
1526         newTreeEntry(VL, false, false);
1527         DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
1528         return;
1529       }
1530       newTreeEntry(VL, true, false);
1531       DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
1532 
1533       // Reorder operands if reordering would enable vectorization.
1534       if (isa<BinaryOperator>(VL0)) {
1535         ValueList Left, Right;
1536         reorderAltShuffleOperands(VL, Left, Right);
1537         buildTree_rec(Left, Depth + 1);
1538         buildTree_rec(Right, Depth + 1);
1539         return;
1540       }
1541 
1542       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1543         ValueList Operands;
1544         // Prepare the operand vector.
1545         for (Value *j : VL)
1546           Operands.push_back(cast<Instruction>(j)->getOperand(i));
1547 
1548         buildTree_rec(Operands, Depth + 1);
1549       }
1550       return;
1551     }
1552     default:
1553       BS.cancelScheduling(VL);
1554       newTreeEntry(VL, false, false);
1555       DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
1556       return;
1557   }
1558 }
1559 
1560 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const {
1561   unsigned N;
1562   Type *EltTy;
1563   auto *ST = dyn_cast<StructType>(T);
1564   if (ST) {
1565     N = ST->getNumElements();
1566     EltTy = *ST->element_begin();
1567   } else {
1568     N = cast<ArrayType>(T)->getNumElements();
1569     EltTy = cast<ArrayType>(T)->getElementType();
1570   }
1571   if (!isValidElementType(EltTy))
1572     return 0;
1573   uint64_t VTSize = DL.getTypeStoreSizeInBits(VectorType::get(EltTy, N));
1574   if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T))
1575     return 0;
1576   if (ST) {
1577     // Check that struct is homogeneous.
1578     for (const auto *Ty : ST->elements())
1579       if (Ty != EltTy)
1580         return 0;
1581   }
1582   return N;
1583 }
1584 
1585 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const {
1586   assert(Opcode == Instruction::ExtractElement ||
1587          Opcode == Instruction::ExtractValue);
1588   assert(Opcode == getSameOpcode(VL) && "Invalid opcode");
1589   // Check if all of the extracts come from the same vector and from the
1590   // correct offset.
1591   Value *VL0 = VL[0];
1592   Instruction *E0 = cast<Instruction>(VL0);
1593   Value *Vec = E0->getOperand(0);
1594 
1595   // We have to extract from a vector/aggregate with the same number of elements.
1596   unsigned NElts;
1597   if (Opcode == Instruction::ExtractValue) {
1598     const DataLayout &DL = E0->getModule()->getDataLayout();
1599     NElts = canMapToVector(Vec->getType(), DL);
1600     if (!NElts)
1601       return false;
1602     // Check if load can be rewritten as load of vector.
1603     LoadInst *LI = dyn_cast<LoadInst>(Vec);
1604     if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size()))
1605       return false;
1606   } else {
1607     NElts = Vec->getType()->getVectorNumElements();
1608   }
1609 
1610   if (NElts != VL.size())
1611     return false;
1612 
1613   // Check that all of the indices extract from the correct offset.
1614   if (!matchExtractIndex(E0, 0, Opcode))
1615     return false;
1616 
1617   for (unsigned i = 1, e = VL.size(); i < e; ++i) {
1618     Instruction *E = cast<Instruction>(VL[i]);
1619     if (!matchExtractIndex(E, i, Opcode))
1620       return false;
1621     if (E->getOperand(0) != Vec)
1622       return false;
1623   }
1624 
1625   return true;
1626 }
1627 
1628 int BoUpSLP::getEntryCost(TreeEntry *E) {
1629   ArrayRef<Value*> VL = E->Scalars;
1630 
1631   Type *ScalarTy = VL[0]->getType();
1632   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
1633     ScalarTy = SI->getValueOperand()->getType();
1634   VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
1635 
1636   // If we have computed a smaller type for the expression, update VecTy so
1637   // that the costs will be accurate.
1638   if (MinBWs.count(VL[0]))
1639     VecTy = VectorType::get(
1640         IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size());
1641 
1642   if (E->NeedToGather) {
1643     if (allConstant(VL))
1644       return 0;
1645     if (isSplat(VL)) {
1646       return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0);
1647     }
1648     return getGatherCost(E->Scalars);
1649   }
1650   unsigned Opcode = getSameOpcode(VL);
1651   assert(Opcode && allSameType(VL) && allSameBlock(VL) && "Invalid VL");
1652   Instruction *VL0 = cast<Instruction>(VL[0]);
1653   switch (Opcode) {
1654     case Instruction::PHI: {
1655       return 0;
1656     }
1657     case Instruction::ExtractValue:
1658     case Instruction::ExtractElement: {
1659       if (canReuseExtract(VL, Opcode)) {
1660         int DeadCost = 0;
1661         for (unsigned i = 0, e = VL.size(); i < e; ++i) {
1662           Instruction *E = cast<Instruction>(VL[i]);
1663           // If all users are going to be vectorized, instruction can be
1664           // considered as dead.
1665           // The same, if have only one user, it will be vectorized for sure.
1666           if (E->hasOneUse() ||
1667               std::all_of(E->user_begin(), E->user_end(), [this](User *U) {
1668                 return ScalarToTreeEntry.count(U) > 0;
1669               }))
1670             // Take credit for instruction that will become dead.
1671             DeadCost +=
1672                 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i);
1673         }
1674         return -DeadCost;
1675       }
1676       return getGatherCost(VecTy);
1677     }
1678     case Instruction::ZExt:
1679     case Instruction::SExt:
1680     case Instruction::FPToUI:
1681     case Instruction::FPToSI:
1682     case Instruction::FPExt:
1683     case Instruction::PtrToInt:
1684     case Instruction::IntToPtr:
1685     case Instruction::SIToFP:
1686     case Instruction::UIToFP:
1687     case Instruction::Trunc:
1688     case Instruction::FPTrunc:
1689     case Instruction::BitCast: {
1690       Type *SrcTy = VL0->getOperand(0)->getType();
1691 
1692       // Calculate the cost of this instruction.
1693       int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(),
1694                                                          VL0->getType(), SrcTy);
1695 
1696       VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size());
1697       int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy);
1698       return VecCost - ScalarCost;
1699     }
1700     case Instruction::FCmp:
1701     case Instruction::ICmp:
1702     case Instruction::Select: {
1703       // Calculate the cost of this instruction.
1704       VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size());
1705       int ScalarCost = VecTy->getNumElements() *
1706           TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty());
1707       int VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy);
1708       return VecCost - ScalarCost;
1709     }
1710     case Instruction::Add:
1711     case Instruction::FAdd:
1712     case Instruction::Sub:
1713     case Instruction::FSub:
1714     case Instruction::Mul:
1715     case Instruction::FMul:
1716     case Instruction::UDiv:
1717     case Instruction::SDiv:
1718     case Instruction::FDiv:
1719     case Instruction::URem:
1720     case Instruction::SRem:
1721     case Instruction::FRem:
1722     case Instruction::Shl:
1723     case Instruction::LShr:
1724     case Instruction::AShr:
1725     case Instruction::And:
1726     case Instruction::Or:
1727     case Instruction::Xor: {
1728       // Certain instructions can be cheaper to vectorize if they have a
1729       // constant second vector operand.
1730       TargetTransformInfo::OperandValueKind Op1VK =
1731           TargetTransformInfo::OK_AnyValue;
1732       TargetTransformInfo::OperandValueKind Op2VK =
1733           TargetTransformInfo::OK_UniformConstantValue;
1734       TargetTransformInfo::OperandValueProperties Op1VP =
1735           TargetTransformInfo::OP_None;
1736       TargetTransformInfo::OperandValueProperties Op2VP =
1737           TargetTransformInfo::OP_None;
1738 
1739       // If all operands are exactly the same ConstantInt then set the
1740       // operand kind to OK_UniformConstantValue.
1741       // If instead not all operands are constants, then set the operand kind
1742       // to OK_AnyValue. If all operands are constants but not the same,
1743       // then set the operand kind to OK_NonUniformConstantValue.
1744       ConstantInt *CInt = nullptr;
1745       for (unsigned i = 0; i < VL.size(); ++i) {
1746         const Instruction *I = cast<Instruction>(VL[i]);
1747         if (!isa<ConstantInt>(I->getOperand(1))) {
1748           Op2VK = TargetTransformInfo::OK_AnyValue;
1749           break;
1750         }
1751         if (i == 0) {
1752           CInt = cast<ConstantInt>(I->getOperand(1));
1753           continue;
1754         }
1755         if (Op2VK == TargetTransformInfo::OK_UniformConstantValue &&
1756             CInt != cast<ConstantInt>(I->getOperand(1)))
1757           Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
1758       }
1759       // FIXME: Currently cost of model modification for division by power of
1760       // 2 is handled for X86 and AArch64. Add support for other targets.
1761       if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt &&
1762           CInt->getValue().isPowerOf2())
1763         Op2VP = TargetTransformInfo::OP_PowerOf2;
1764 
1765       int ScalarCost = VecTy->getNumElements() *
1766                        TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK,
1767                                                    Op2VK, Op1VP, Op2VP);
1768       int VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK,
1769                                                 Op1VP, Op2VP);
1770       return VecCost - ScalarCost;
1771     }
1772     case Instruction::GetElementPtr: {
1773       TargetTransformInfo::OperandValueKind Op1VK =
1774           TargetTransformInfo::OK_AnyValue;
1775       TargetTransformInfo::OperandValueKind Op2VK =
1776           TargetTransformInfo::OK_UniformConstantValue;
1777 
1778       int ScalarCost =
1779           VecTy->getNumElements() *
1780           TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK);
1781       int VecCost =
1782           TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK);
1783 
1784       return VecCost - ScalarCost;
1785     }
1786     case Instruction::Load: {
1787       // Cost of wide load - cost of scalar loads.
1788       unsigned alignment = dyn_cast<LoadInst>(VL0)->getAlignment();
1789       int ScalarLdCost = VecTy->getNumElements() *
1790             TTI->getMemoryOpCost(Instruction::Load, ScalarTy, alignment, 0);
1791       int VecLdCost = TTI->getMemoryOpCost(Instruction::Load,
1792                                            VecTy, alignment, 0);
1793       if (E->NeedToShuffle) {
1794         VecLdCost += TTI->getShuffleCost(
1795             TargetTransformInfo::SK_PermuteSingleSrc, VecTy, 0);
1796       }
1797       return VecLdCost - ScalarLdCost;
1798     }
1799     case Instruction::Store: {
1800       // We know that we can merge the stores. Calculate the cost.
1801       unsigned alignment = dyn_cast<StoreInst>(VL0)->getAlignment();
1802       int ScalarStCost = VecTy->getNumElements() *
1803             TTI->getMemoryOpCost(Instruction::Store, ScalarTy, alignment, 0);
1804       int VecStCost = TTI->getMemoryOpCost(Instruction::Store,
1805                                            VecTy, alignment, 0);
1806       return VecStCost - ScalarStCost;
1807     }
1808     case Instruction::Call: {
1809       CallInst *CI = cast<CallInst>(VL0);
1810       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
1811 
1812       // Calculate the cost of the scalar and vector calls.
1813       SmallVector<Type*, 4> ScalarTys, VecTys;
1814       for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) {
1815         ScalarTys.push_back(CI->getArgOperand(op)->getType());
1816         VecTys.push_back(VectorType::get(CI->getArgOperand(op)->getType(),
1817                                          VecTy->getNumElements()));
1818       }
1819 
1820       FastMathFlags FMF;
1821       if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
1822         FMF = FPMO->getFastMathFlags();
1823 
1824       int ScalarCallCost = VecTy->getNumElements() *
1825           TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys, FMF);
1826 
1827       int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys, FMF);
1828 
1829       DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost
1830             << " (" << VecCallCost  << "-" <<  ScalarCallCost << ")"
1831             << " for " << *CI << "\n");
1832 
1833       return VecCallCost - ScalarCallCost;
1834     }
1835     case Instruction::ShuffleVector: {
1836       TargetTransformInfo::OperandValueKind Op1VK =
1837           TargetTransformInfo::OK_AnyValue;
1838       TargetTransformInfo::OperandValueKind Op2VK =
1839           TargetTransformInfo::OK_AnyValue;
1840       int ScalarCost = 0;
1841       int VecCost = 0;
1842       for (Value *i : VL) {
1843         Instruction *I = cast<Instruction>(i);
1844         if (!I)
1845           break;
1846         ScalarCost +=
1847             TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK);
1848       }
1849       // VecCost is equal to sum of the cost of creating 2 vectors
1850       // and the cost of creating shuffle.
1851       Instruction *I0 = cast<Instruction>(VL[0]);
1852       VecCost =
1853           TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK);
1854       Instruction *I1 = cast<Instruction>(VL[1]);
1855       VecCost +=
1856           TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK);
1857       VecCost +=
1858           TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0);
1859       return VecCost - ScalarCost;
1860     }
1861     default:
1862       llvm_unreachable("Unknown instruction");
1863   }
1864 }
1865 
1866 bool BoUpSLP::isFullyVectorizableTinyTree() {
1867   DEBUG(dbgs() << "SLP: Check whether the tree with height " <<
1868         VectorizableTree.size() << " is fully vectorizable .\n");
1869 
1870   // We only handle trees of heights 1 and 2.
1871   if (VectorizableTree.size() == 1 && !VectorizableTree[0].NeedToGather)
1872     return true;
1873 
1874   if (VectorizableTree.size() != 2)
1875     return false;
1876 
1877   // Handle splat and all-constants stores.
1878   if (!VectorizableTree[0].NeedToGather &&
1879       (allConstant(VectorizableTree[1].Scalars) ||
1880        isSplat(VectorizableTree[1].Scalars)))
1881     return true;
1882 
1883   // Gathering cost would be too much for tiny trees.
1884   if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather)
1885     return false;
1886 
1887   return true;
1888 }
1889 
1890 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() {
1891 
1892   // We can vectorize the tree if its size is greater than or equal to the
1893   // minimum size specified by the MinTreeSize command line option.
1894   if (VectorizableTree.size() >= MinTreeSize)
1895     return false;
1896 
1897   // If we have a tiny tree (a tree whose size is less than MinTreeSize), we
1898   // can vectorize it if we can prove it fully vectorizable.
1899   if (isFullyVectorizableTinyTree())
1900     return false;
1901 
1902   assert(VectorizableTree.empty()
1903              ? ExternalUses.empty()
1904              : true && "We shouldn't have any external users");
1905 
1906   // Otherwise, we can't vectorize the tree. It is both tiny and not fully
1907   // vectorizable.
1908   return true;
1909 }
1910 
1911 int BoUpSLP::getSpillCost() {
1912   // Walk from the bottom of the tree to the top, tracking which values are
1913   // live. When we see a call instruction that is not part of our tree,
1914   // query TTI to see if there is a cost to keeping values live over it
1915   // (for example, if spills and fills are required).
1916   unsigned BundleWidth = VectorizableTree.front().Scalars.size();
1917   int Cost = 0;
1918 
1919   SmallPtrSet<Instruction*, 4> LiveValues;
1920   Instruction *PrevInst = nullptr;
1921 
1922   for (const auto &N : VectorizableTree) {
1923     Instruction *Inst = dyn_cast<Instruction>(N.Scalars[0]);
1924     if (!Inst)
1925       continue;
1926 
1927     if (!PrevInst) {
1928       PrevInst = Inst;
1929       continue;
1930     }
1931 
1932     // Update LiveValues.
1933     LiveValues.erase(PrevInst);
1934     for (auto &J : PrevInst->operands()) {
1935       if (isa<Instruction>(&*J) && ScalarToTreeEntry.count(&*J))
1936         LiveValues.insert(cast<Instruction>(&*J));
1937     }
1938 
1939     DEBUG(
1940       dbgs() << "SLP: #LV: " << LiveValues.size();
1941       for (auto *X : LiveValues)
1942         dbgs() << " " << X->getName();
1943       dbgs() << ", Looking at ";
1944       Inst->dump();
1945       );
1946 
1947     // Now find the sequence of instructions between PrevInst and Inst.
1948     BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(),
1949                                  PrevInstIt =
1950                                      PrevInst->getIterator().getReverse();
1951     while (InstIt != PrevInstIt) {
1952       if (PrevInstIt == PrevInst->getParent()->rend()) {
1953         PrevInstIt = Inst->getParent()->rbegin();
1954         continue;
1955       }
1956 
1957       if (isa<CallInst>(&*PrevInstIt) && &*PrevInstIt != PrevInst) {
1958         SmallVector<Type*, 4> V;
1959         for (auto *II : LiveValues)
1960           V.push_back(VectorType::get(II->getType(), BundleWidth));
1961         Cost += TTI->getCostOfKeepingLiveOverCall(V);
1962       }
1963 
1964       ++PrevInstIt;
1965     }
1966 
1967     PrevInst = Inst;
1968   }
1969 
1970   return Cost;
1971 }
1972 
1973 int BoUpSLP::getTreeCost() {
1974   int Cost = 0;
1975   DEBUG(dbgs() << "SLP: Calculating cost for tree of size " <<
1976         VectorizableTree.size() << ".\n");
1977 
1978   unsigned BundleWidth = VectorizableTree[0].Scalars.size();
1979 
1980   for (TreeEntry &TE : VectorizableTree) {
1981     int C = getEntryCost(&TE);
1982     DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with "
1983                  << *TE.Scalars[0] << ".\n");
1984     Cost += C;
1985   }
1986 
1987   SmallSet<Value *, 16> ExtractCostCalculated;
1988   int ExtractCost = 0;
1989   for (ExternalUser &EU : ExternalUses) {
1990     // We only add extract cost once for the same scalar.
1991     if (!ExtractCostCalculated.insert(EU.Scalar).second)
1992       continue;
1993 
1994     // Uses by ephemeral values are free (because the ephemeral value will be
1995     // removed prior to code generation, and so the extraction will be
1996     // removed as well).
1997     if (EphValues.count(EU.User))
1998       continue;
1999 
2000     // If we plan to rewrite the tree in a smaller type, we will need to sign
2001     // extend the extracted value back to the original type. Here, we account
2002     // for the extract and the added cost of the sign extend if needed.
2003     auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth);
2004     auto *ScalarRoot = VectorizableTree[0].Scalars[0];
2005     if (MinBWs.count(ScalarRoot)) {
2006       auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
2007       auto Extend =
2008           MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt;
2009       VecTy = VectorType::get(MinTy, BundleWidth);
2010       ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(),
2011                                                    VecTy, EU.Lane);
2012     } else {
2013       ExtractCost +=
2014           TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane);
2015     }
2016   }
2017 
2018   int SpillCost = getSpillCost();
2019   Cost += SpillCost + ExtractCost;
2020 
2021   DEBUG(dbgs() << "SLP: Spill Cost = " << SpillCost << ".\n"
2022                << "SLP: Extract Cost = " << ExtractCost << ".\n"
2023                << "SLP: Total Cost = " << Cost << ".\n");
2024   return Cost;
2025 }
2026 
2027 int BoUpSLP::getGatherCost(Type *Ty) {
2028   int Cost = 0;
2029   for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i)
2030     Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
2031   return Cost;
2032 }
2033 
2034 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) {
2035   // Find the type of the operands in VL.
2036   Type *ScalarTy = VL[0]->getType();
2037   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
2038     ScalarTy = SI->getValueOperand()->getType();
2039   VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
2040   // Find the cost of inserting/extracting values from the vector.
2041   return getGatherCost(VecTy);
2042 }
2043 
2044 // Reorder commutative operations in alternate shuffle if the resulting vectors
2045 // are consecutive loads. This would allow us to vectorize the tree.
2046 // If we have something like-
2047 // load a[0] - load b[0]
2048 // load b[1] + load a[1]
2049 // load a[2] - load b[2]
2050 // load a[3] + load b[3]
2051 // Reordering the second load b[1]  load a[1] would allow us to vectorize this
2052 // code.
2053 void BoUpSLP::reorderAltShuffleOperands(ArrayRef<Value *> VL,
2054                                         SmallVectorImpl<Value *> &Left,
2055                                         SmallVectorImpl<Value *> &Right) {
2056   // Push left and right operands of binary operation into Left and Right
2057   for (Value *i : VL) {
2058     Left.push_back(cast<Instruction>(i)->getOperand(0));
2059     Right.push_back(cast<Instruction>(i)->getOperand(1));
2060   }
2061 
2062   // Reorder if we have a commutative operation and consecutive access
2063   // are on either side of the alternate instructions.
2064   for (unsigned j = 0; j < VL.size() - 1; ++j) {
2065     if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) {
2066       if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) {
2067         Instruction *VL1 = cast<Instruction>(VL[j]);
2068         Instruction *VL2 = cast<Instruction>(VL[j + 1]);
2069         if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) {
2070           std::swap(Left[j], Right[j]);
2071           continue;
2072         } else if (VL2->isCommutative() &&
2073                    isConsecutiveAccess(L, L1, *DL, *SE)) {
2074           std::swap(Left[j + 1], Right[j + 1]);
2075           continue;
2076         }
2077         // else unchanged
2078       }
2079     }
2080     if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) {
2081       if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) {
2082         Instruction *VL1 = cast<Instruction>(VL[j]);
2083         Instruction *VL2 = cast<Instruction>(VL[j + 1]);
2084         if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) {
2085           std::swap(Left[j], Right[j]);
2086           continue;
2087         } else if (VL2->isCommutative() &&
2088                    isConsecutiveAccess(L, L1, *DL, *SE)) {
2089           std::swap(Left[j + 1], Right[j + 1]);
2090           continue;
2091         }
2092         // else unchanged
2093       }
2094     }
2095   }
2096 }
2097 
2098 // Return true if I should be commuted before adding it's left and right
2099 // operands to the arrays Left and Right.
2100 //
2101 // The vectorizer is trying to either have all elements one side being
2102 // instruction with the same opcode to enable further vectorization, or having
2103 // a splat to lower the vectorizing cost.
2104 static bool shouldReorderOperands(int i, Instruction &I,
2105                                   SmallVectorImpl<Value *> &Left,
2106                                   SmallVectorImpl<Value *> &Right,
2107                                   bool AllSameOpcodeLeft,
2108                                   bool AllSameOpcodeRight, bool SplatLeft,
2109                                   bool SplatRight) {
2110   Value *VLeft = I.getOperand(0);
2111   Value *VRight = I.getOperand(1);
2112   // If we have "SplatRight", try to see if commuting is needed to preserve it.
2113   if (SplatRight) {
2114     if (VRight == Right[i - 1])
2115       // Preserve SplatRight
2116       return false;
2117     if (VLeft == Right[i - 1]) {
2118       // Commuting would preserve SplatRight, but we don't want to break
2119       // SplatLeft either, i.e. preserve the original order if possible.
2120       // (FIXME: why do we care?)
2121       if (SplatLeft && VLeft == Left[i - 1])
2122         return false;
2123       return true;
2124     }
2125   }
2126   // Symmetrically handle Right side.
2127   if (SplatLeft) {
2128     if (VLeft == Left[i - 1])
2129       // Preserve SplatLeft
2130       return false;
2131     if (VRight == Left[i - 1])
2132       return true;
2133   }
2134 
2135   Instruction *ILeft = dyn_cast<Instruction>(VLeft);
2136   Instruction *IRight = dyn_cast<Instruction>(VRight);
2137 
2138   // If we have "AllSameOpcodeRight", try to see if the left operands preserves
2139   // it and not the right, in this case we want to commute.
2140   if (AllSameOpcodeRight) {
2141     unsigned RightPrevOpcode = cast<Instruction>(Right[i - 1])->getOpcode();
2142     if (IRight && RightPrevOpcode == IRight->getOpcode())
2143       // Do not commute, a match on the right preserves AllSameOpcodeRight
2144       return false;
2145     if (ILeft && RightPrevOpcode == ILeft->getOpcode()) {
2146       // We have a match and may want to commute, but first check if there is
2147       // not also a match on the existing operands on the Left to preserve
2148       // AllSameOpcodeLeft, i.e. preserve the original order if possible.
2149       // (FIXME: why do we care?)
2150       if (AllSameOpcodeLeft && ILeft &&
2151           cast<Instruction>(Left[i - 1])->getOpcode() == ILeft->getOpcode())
2152         return false;
2153       return true;
2154     }
2155   }
2156   // Symmetrically handle Left side.
2157   if (AllSameOpcodeLeft) {
2158     unsigned LeftPrevOpcode = cast<Instruction>(Left[i - 1])->getOpcode();
2159     if (ILeft && LeftPrevOpcode == ILeft->getOpcode())
2160       return false;
2161     if (IRight && LeftPrevOpcode == IRight->getOpcode())
2162       return true;
2163   }
2164   return false;
2165 }
2166 
2167 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
2168                                              SmallVectorImpl<Value *> &Left,
2169                                              SmallVectorImpl<Value *> &Right) {
2170 
2171   if (VL.size()) {
2172     // Peel the first iteration out of the loop since there's nothing
2173     // interesting to do anyway and it simplifies the checks in the loop.
2174     auto VLeft = cast<Instruction>(VL[0])->getOperand(0);
2175     auto VRight = cast<Instruction>(VL[0])->getOperand(1);
2176     if (!isa<Instruction>(VRight) && isa<Instruction>(VLeft))
2177       // Favor having instruction to the right. FIXME: why?
2178       std::swap(VLeft, VRight);
2179     Left.push_back(VLeft);
2180     Right.push_back(VRight);
2181   }
2182 
2183   // Keep track if we have instructions with all the same opcode on one side.
2184   bool AllSameOpcodeLeft = isa<Instruction>(Left[0]);
2185   bool AllSameOpcodeRight = isa<Instruction>(Right[0]);
2186   // Keep track if we have one side with all the same value (broadcast).
2187   bool SplatLeft = true;
2188   bool SplatRight = true;
2189 
2190   for (unsigned i = 1, e = VL.size(); i != e; ++i) {
2191     Instruction *I = cast<Instruction>(VL[i]);
2192     assert(I->isCommutative() && "Can only process commutative instruction");
2193     // Commute to favor either a splat or maximizing having the same opcodes on
2194     // one side.
2195     if (shouldReorderOperands(i, *I, Left, Right, AllSameOpcodeLeft,
2196                               AllSameOpcodeRight, SplatLeft, SplatRight)) {
2197       Left.push_back(I->getOperand(1));
2198       Right.push_back(I->getOperand(0));
2199     } else {
2200       Left.push_back(I->getOperand(0));
2201       Right.push_back(I->getOperand(1));
2202     }
2203     // Update Splat* and AllSameOpcode* after the insertion.
2204     SplatRight = SplatRight && (Right[i - 1] == Right[i]);
2205     SplatLeft = SplatLeft && (Left[i - 1] == Left[i]);
2206     AllSameOpcodeLeft = AllSameOpcodeLeft && isa<Instruction>(Left[i]) &&
2207                         (cast<Instruction>(Left[i - 1])->getOpcode() ==
2208                          cast<Instruction>(Left[i])->getOpcode());
2209     AllSameOpcodeRight = AllSameOpcodeRight && isa<Instruction>(Right[i]) &&
2210                          (cast<Instruction>(Right[i - 1])->getOpcode() ==
2211                           cast<Instruction>(Right[i])->getOpcode());
2212   }
2213 
2214   // If one operand end up being broadcast, return this operand order.
2215   if (SplatRight || SplatLeft)
2216     return;
2217 
2218   // Finally check if we can get longer vectorizable chain by reordering
2219   // without breaking the good operand order detected above.
2220   // E.g. If we have something like-
2221   // load a[0]  load b[0]
2222   // load b[1]  load a[1]
2223   // load a[2]  load b[2]
2224   // load a[3]  load b[3]
2225   // Reordering the second load b[1]  load a[1] would allow us to vectorize
2226   // this code and we still retain AllSameOpcode property.
2227   // FIXME: This load reordering might break AllSameOpcode in some rare cases
2228   // such as-
2229   // add a[0],c[0]  load b[0]
2230   // add a[1],c[2]  load b[1]
2231   // b[2]           load b[2]
2232   // add a[3],c[3]  load b[3]
2233   for (unsigned j = 0; j < VL.size() - 1; ++j) {
2234     if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) {
2235       if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) {
2236         if (isConsecutiveAccess(L, L1, *DL, *SE)) {
2237           std::swap(Left[j + 1], Right[j + 1]);
2238           continue;
2239         }
2240       }
2241     }
2242     if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) {
2243       if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) {
2244         if (isConsecutiveAccess(L, L1, *DL, *SE)) {
2245           std::swap(Left[j + 1], Right[j + 1]);
2246           continue;
2247         }
2248       }
2249     }
2250     // else unchanged
2251   }
2252 }
2253 
2254 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) {
2255 
2256   // Get the basic block this bundle is in. All instructions in the bundle
2257   // should be in this block.
2258   auto *Front = cast<Instruction>(VL.front());
2259   auto *BB = Front->getParent();
2260   assert(all_of(make_range(VL.begin(), VL.end()), [&](Value *V) -> bool {
2261     return cast<Instruction>(V)->getParent() == BB;
2262   }));
2263 
2264   // The last instruction in the bundle in program order.
2265   Instruction *LastInst = nullptr;
2266 
2267   // Find the last instruction. The common case should be that BB has been
2268   // scheduled, and the last instruction is VL.back(). So we start with
2269   // VL.back() and iterate over schedule data until we reach the end of the
2270   // bundle. The end of the bundle is marked by null ScheduleData.
2271   if (BlocksSchedules.count(BB)) {
2272     auto *Bundle = BlocksSchedules[BB]->getScheduleData(VL.back());
2273     if (Bundle && Bundle->isPartOfBundle())
2274       for (; Bundle; Bundle = Bundle->NextInBundle)
2275         LastInst = Bundle->Inst;
2276   }
2277 
2278   // LastInst can still be null at this point if there's either not an entry
2279   // for BB in BlocksSchedules or there's no ScheduleData available for
2280   // VL.back(). This can be the case if buildTree_rec aborts for various
2281   // reasons (e.g., the maximum recursion depth is reached, the maximum region
2282   // size is reached, etc.). ScheduleData is initialized in the scheduling
2283   // "dry-run".
2284   //
2285   // If this happens, we can still find the last instruction by brute force. We
2286   // iterate forwards from Front (inclusive) until we either see all
2287   // instructions in the bundle or reach the end of the block. If Front is the
2288   // last instruction in program order, LastInst will be set to Front, and we
2289   // will visit all the remaining instructions in the block.
2290   //
2291   // One of the reasons we exit early from buildTree_rec is to place an upper
2292   // bound on compile-time. Thus, taking an additional compile-time hit here is
2293   // not ideal. However, this should be exceedingly rare since it requires that
2294   // we both exit early from buildTree_rec and that the bundle be out-of-order
2295   // (causing us to iterate all the way to the end of the block).
2296   if (!LastInst) {
2297     SmallPtrSet<Value *, 16> Bundle(VL.begin(), VL.end());
2298     for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) {
2299       if (Bundle.erase(&I))
2300         LastInst = &I;
2301       if (Bundle.empty())
2302         break;
2303     }
2304   }
2305 
2306   // Set the insertion point after the last instruction in the bundle. Set the
2307   // debug location to Front.
2308   Builder.SetInsertPoint(BB, ++LastInst->getIterator());
2309   Builder.SetCurrentDebugLocation(Front->getDebugLoc());
2310 }
2311 
2312 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) {
2313   Value *Vec = UndefValue::get(Ty);
2314   // Generate the 'InsertElement' instruction.
2315   for (unsigned i = 0; i < Ty->getNumElements(); ++i) {
2316     Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i));
2317     if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) {
2318       GatherSeq.insert(Insrt);
2319       CSEBlocks.insert(Insrt->getParent());
2320 
2321       // Add to our 'need-to-extract' list.
2322       if (ScalarToTreeEntry.count(VL[i])) {
2323         int Idx = ScalarToTreeEntry[VL[i]];
2324         TreeEntry *E = &VectorizableTree[Idx];
2325         // Find which lane we need to extract.
2326         int FoundLane = -1;
2327         for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) {
2328           // Is this the lane of the scalar that we are looking for ?
2329           if (E->Scalars[Lane] == VL[i]) {
2330             FoundLane = Lane;
2331             break;
2332           }
2333         }
2334         assert(FoundLane >= 0 && "Could not find the correct lane");
2335         ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane));
2336       }
2337     }
2338   }
2339 
2340   return Vec;
2341 }
2342 
2343 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const {
2344   SmallDenseMap<Value*, int>::const_iterator Entry
2345     = ScalarToTreeEntry.find(VL[0]);
2346   if (Entry != ScalarToTreeEntry.end()) {
2347     int Idx = Entry->second;
2348     const TreeEntry *En = &VectorizableTree[Idx];
2349     if (En->isSame(VL) && En->VectorizedValue)
2350       return En->VectorizedValue;
2351   }
2352   return nullptr;
2353 }
2354 
2355 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
2356   if (ScalarToTreeEntry.count(VL[0])) {
2357     int Idx = ScalarToTreeEntry[VL[0]];
2358     TreeEntry *E = &VectorizableTree[Idx];
2359     if (E->isSame(VL) || (E->NeedToShuffle && E->isFoundJumbled(VL, *DL, *SE)))
2360       return vectorizeTree(VL, E);
2361   }
2362 
2363   Type *ScalarTy = VL[0]->getType();
2364   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
2365     ScalarTy = SI->getValueOperand()->getType();
2366   VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
2367 
2368   return Gather(VL, VecTy);
2369 }
2370 
2371 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL, TreeEntry *E) {
2372   IRBuilder<>::InsertPointGuard Guard(Builder);
2373 
2374   if (E->VectorizedValue && !E->NeedToShuffle) {
2375     DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
2376     return E->VectorizedValue;
2377   }
2378 
2379   Instruction *VL0 = cast<Instruction>(E->Scalars[0]);
2380   Type *ScalarTy = VL0->getType();
2381   if (StoreInst *SI = dyn_cast<StoreInst>(VL0))
2382     ScalarTy = SI->getValueOperand()->getType();
2383   VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size());
2384 
2385   if (E->NeedToGather) {
2386     setInsertPointAfterBundle(E->Scalars);
2387     auto *V = Gather(E->Scalars, VecTy);
2388     E->VectorizedValue = V;
2389     return V;
2390   }
2391 
2392   unsigned Opcode = getSameOpcode(E->Scalars);
2393 
2394   switch (Opcode) {
2395     case Instruction::PHI: {
2396       PHINode *PH = dyn_cast<PHINode>(VL0);
2397       Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI());
2398       Builder.SetCurrentDebugLocation(PH->getDebugLoc());
2399       PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
2400       E->VectorizedValue = NewPhi;
2401 
2402       // PHINodes may have multiple entries from the same block. We want to
2403       // visit every block once.
2404       SmallSet<BasicBlock*, 4> VisitedBBs;
2405 
2406       for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
2407         ValueList Operands;
2408         BasicBlock *IBB = PH->getIncomingBlock(i);
2409 
2410         if (!VisitedBBs.insert(IBB).second) {
2411           NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
2412           continue;
2413         }
2414 
2415         // Prepare the operand vector.
2416         for (Value *V : E->Scalars)
2417           Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(IBB));
2418 
2419         Builder.SetInsertPoint(IBB->getTerminator());
2420         Builder.SetCurrentDebugLocation(PH->getDebugLoc());
2421         Value *Vec = vectorizeTree(Operands);
2422         NewPhi->addIncoming(Vec, IBB);
2423       }
2424 
2425       assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&
2426              "Invalid number of incoming values");
2427       return NewPhi;
2428     }
2429 
2430     case Instruction::ExtractElement: {
2431       if (canReuseExtract(E->Scalars, Instruction::ExtractElement)) {
2432         Value *V = VL0->getOperand(0);
2433         E->VectorizedValue = V;
2434         return V;
2435       }
2436       setInsertPointAfterBundle(E->Scalars);
2437       auto *V = Gather(E->Scalars, VecTy);
2438       E->VectorizedValue = V;
2439       return V;
2440     }
2441     case Instruction::ExtractValue: {
2442       if (canReuseExtract(E->Scalars, Instruction::ExtractValue)) {
2443         LoadInst *LI = cast<LoadInst>(VL0->getOperand(0));
2444         Builder.SetInsertPoint(LI);
2445         PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace());
2446         Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy);
2447         LoadInst *V = Builder.CreateAlignedLoad(Ptr, LI->getAlignment());
2448         E->VectorizedValue = V;
2449         return propagateMetadata(V, E->Scalars);
2450       }
2451       setInsertPointAfterBundle(E->Scalars);
2452       auto *V = Gather(E->Scalars, VecTy);
2453       E->VectorizedValue = V;
2454       return V;
2455     }
2456     case Instruction::ZExt:
2457     case Instruction::SExt:
2458     case Instruction::FPToUI:
2459     case Instruction::FPToSI:
2460     case Instruction::FPExt:
2461     case Instruction::PtrToInt:
2462     case Instruction::IntToPtr:
2463     case Instruction::SIToFP:
2464     case Instruction::UIToFP:
2465     case Instruction::Trunc:
2466     case Instruction::FPTrunc:
2467     case Instruction::BitCast: {
2468       ValueList INVL;
2469       for (Value *V : E->Scalars)
2470         INVL.push_back(cast<Instruction>(V)->getOperand(0));
2471 
2472       setInsertPointAfterBundle(E->Scalars);
2473 
2474       Value *InVec = vectorizeTree(INVL);
2475 
2476       if (Value *V = alreadyVectorized(E->Scalars))
2477         return V;
2478 
2479       CastInst *CI = dyn_cast<CastInst>(VL0);
2480       Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy);
2481       E->VectorizedValue = V;
2482       ++NumVectorInstructions;
2483       return V;
2484     }
2485     case Instruction::FCmp:
2486     case Instruction::ICmp: {
2487       ValueList LHSV, RHSV;
2488       for (Value *V : E->Scalars) {
2489         LHSV.push_back(cast<Instruction>(V)->getOperand(0));
2490         RHSV.push_back(cast<Instruction>(V)->getOperand(1));
2491       }
2492 
2493       setInsertPointAfterBundle(E->Scalars);
2494 
2495       Value *L = vectorizeTree(LHSV);
2496       Value *R = vectorizeTree(RHSV);
2497 
2498       if (Value *V = alreadyVectorized(E->Scalars))
2499         return V;
2500 
2501       CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
2502       Value *V;
2503       if (Opcode == Instruction::FCmp)
2504         V = Builder.CreateFCmp(P0, L, R);
2505       else
2506         V = Builder.CreateICmp(P0, L, R);
2507 
2508       E->VectorizedValue = V;
2509       propagateIRFlags(E->VectorizedValue, E->Scalars);
2510       ++NumVectorInstructions;
2511       return V;
2512     }
2513     case Instruction::Select: {
2514       ValueList TrueVec, FalseVec, CondVec;
2515       for (Value *V : E->Scalars) {
2516         CondVec.push_back(cast<Instruction>(V)->getOperand(0));
2517         TrueVec.push_back(cast<Instruction>(V)->getOperand(1));
2518         FalseVec.push_back(cast<Instruction>(V)->getOperand(2));
2519       }
2520 
2521       setInsertPointAfterBundle(E->Scalars);
2522 
2523       Value *Cond = vectorizeTree(CondVec);
2524       Value *True = vectorizeTree(TrueVec);
2525       Value *False = vectorizeTree(FalseVec);
2526 
2527       if (Value *V = alreadyVectorized(E->Scalars))
2528         return V;
2529 
2530       Value *V = Builder.CreateSelect(Cond, True, False);
2531       E->VectorizedValue = V;
2532       ++NumVectorInstructions;
2533       return V;
2534     }
2535     case Instruction::Add:
2536     case Instruction::FAdd:
2537     case Instruction::Sub:
2538     case Instruction::FSub:
2539     case Instruction::Mul:
2540     case Instruction::FMul:
2541     case Instruction::UDiv:
2542     case Instruction::SDiv:
2543     case Instruction::FDiv:
2544     case Instruction::URem:
2545     case Instruction::SRem:
2546     case Instruction::FRem:
2547     case Instruction::Shl:
2548     case Instruction::LShr:
2549     case Instruction::AShr:
2550     case Instruction::And:
2551     case Instruction::Or:
2552     case Instruction::Xor: {
2553       ValueList LHSVL, RHSVL;
2554       if (isa<BinaryOperator>(VL0) && VL0->isCommutative())
2555         reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL);
2556       else
2557         for (Value *V : E->Scalars) {
2558           LHSVL.push_back(cast<Instruction>(V)->getOperand(0));
2559           RHSVL.push_back(cast<Instruction>(V)->getOperand(1));
2560         }
2561 
2562       setInsertPointAfterBundle(E->Scalars);
2563 
2564       Value *LHS = vectorizeTree(LHSVL);
2565       Value *RHS = vectorizeTree(RHSVL);
2566 
2567       if (Value *V = alreadyVectorized(E->Scalars))
2568         return V;
2569 
2570       BinaryOperator *BinOp = cast<BinaryOperator>(VL0);
2571       Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS);
2572       E->VectorizedValue = V;
2573       propagateIRFlags(E->VectorizedValue, E->Scalars);
2574       ++NumVectorInstructions;
2575 
2576       if (Instruction *I = dyn_cast<Instruction>(V))
2577         return propagateMetadata(I, E->Scalars);
2578 
2579       return V;
2580     }
2581     case Instruction::Load: {
2582       // Loads are inserted at the head of the tree because we don't want to
2583       // sink them all the way down past store instructions.
2584       setInsertPointAfterBundle(E->Scalars);
2585 
2586       LoadInst *LI = cast<LoadInst>(VL0);
2587       Type *ScalarLoadTy = LI->getType();
2588       unsigned AS = LI->getPointerAddressSpace();
2589 
2590       Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(),
2591                                             VecTy->getPointerTo(AS));
2592 
2593       // The pointer operand uses an in-tree scalar so we add the new BitCast to
2594       // ExternalUses list to make sure that an extract will be generated in the
2595       // future.
2596       if (ScalarToTreeEntry.count(LI->getPointerOperand()))
2597         ExternalUses.push_back(
2598             ExternalUser(LI->getPointerOperand(), cast<User>(VecPtr), 0));
2599 
2600       unsigned Alignment = LI->getAlignment();
2601       LI = Builder.CreateLoad(VecPtr);
2602       if (!Alignment) {
2603         Alignment = DL->getABITypeAlignment(ScalarLoadTy);
2604       }
2605       LI->setAlignment(Alignment);
2606       E->VectorizedValue = LI;
2607       ++NumVectorInstructions;
2608       propagateMetadata(LI, E->Scalars);
2609 
2610       // As program order of scalar loads are jumbled, the vectorized 'load'
2611       // must be followed by a 'shuffle' with the required jumbled mask.
2612       if (!VL.empty() && (E->NeedToShuffle)) {
2613         assert(VL.size() == E->Scalars.size() &&
2614                "Equal number of scalars expected");
2615         SmallVector<Constant *, 8> Mask;
2616         for (Value *Val : VL) {
2617           if (ScalarToTreeEntry.count(Val)) {
2618             int Idx = ScalarToTreeEntry[Val];
2619             TreeEntry *E = &VectorizableTree[Idx];
2620             for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) {
2621               if (E->Scalars[Lane] == Val) {
2622                 Mask.push_back(Builder.getInt32(Lane));
2623                 break;
2624               }
2625             }
2626           }
2627         }
2628 
2629         // Generate shuffle for jumbled memory access
2630         Value *Undef = UndefValue::get(VecTy);
2631         Value *Shuf = Builder.CreateShuffleVector((Value *)LI, Undef,
2632                                                   ConstantVector::get(Mask));
2633         return Shuf;
2634       }
2635 
2636       return LI;
2637     }
2638     case Instruction::Store: {
2639       StoreInst *SI = cast<StoreInst>(VL0);
2640       unsigned Alignment = SI->getAlignment();
2641       unsigned AS = SI->getPointerAddressSpace();
2642 
2643       ValueList ValueOp;
2644       for (Value *V : E->Scalars)
2645         ValueOp.push_back(cast<StoreInst>(V)->getValueOperand());
2646 
2647       setInsertPointAfterBundle(E->Scalars);
2648 
2649       Value *VecValue = vectorizeTree(ValueOp);
2650       Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(),
2651                                             VecTy->getPointerTo(AS));
2652       StoreInst *S = Builder.CreateStore(VecValue, VecPtr);
2653 
2654       // The pointer operand uses an in-tree scalar so we add the new BitCast to
2655       // ExternalUses list to make sure that an extract will be generated in the
2656       // future.
2657       if (ScalarToTreeEntry.count(SI->getPointerOperand()))
2658         ExternalUses.push_back(
2659             ExternalUser(SI->getPointerOperand(), cast<User>(VecPtr), 0));
2660 
2661       if (!Alignment) {
2662         Alignment = DL->getABITypeAlignment(SI->getValueOperand()->getType());
2663       }
2664       S->setAlignment(Alignment);
2665       E->VectorizedValue = S;
2666       ++NumVectorInstructions;
2667       return propagateMetadata(S, E->Scalars);
2668     }
2669     case Instruction::GetElementPtr: {
2670       setInsertPointAfterBundle(E->Scalars);
2671 
2672       ValueList Op0VL;
2673       for (Value *V : E->Scalars)
2674         Op0VL.push_back(cast<GetElementPtrInst>(V)->getOperand(0));
2675 
2676       Value *Op0 = vectorizeTree(Op0VL);
2677 
2678       std::vector<Value *> OpVecs;
2679       for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e;
2680            ++j) {
2681         ValueList OpVL;
2682         for (Value *V : E->Scalars)
2683           OpVL.push_back(cast<GetElementPtrInst>(V)->getOperand(j));
2684 
2685         Value *OpVec = vectorizeTree(OpVL);
2686         OpVecs.push_back(OpVec);
2687       }
2688 
2689       Value *V = Builder.CreateGEP(
2690           cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs);
2691       E->VectorizedValue = V;
2692       ++NumVectorInstructions;
2693 
2694       if (Instruction *I = dyn_cast<Instruction>(V))
2695         return propagateMetadata(I, E->Scalars);
2696 
2697       return V;
2698     }
2699     case Instruction::Call: {
2700       CallInst *CI = cast<CallInst>(VL0);
2701       setInsertPointAfterBundle(E->Scalars);
2702       Function *FI;
2703       Intrinsic::ID IID  = Intrinsic::not_intrinsic;
2704       Value *ScalarArg = nullptr;
2705       if (CI && (FI = CI->getCalledFunction())) {
2706         IID = FI->getIntrinsicID();
2707       }
2708       std::vector<Value *> OpVecs;
2709       for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) {
2710         ValueList OpVL;
2711         // ctlz,cttz and powi are special intrinsics whose second argument is
2712         // a scalar. This argument should not be vectorized.
2713         if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) {
2714           CallInst *CEI = cast<CallInst>(E->Scalars[0]);
2715           ScalarArg = CEI->getArgOperand(j);
2716           OpVecs.push_back(CEI->getArgOperand(j));
2717           continue;
2718         }
2719         for (Value *V : E->Scalars) {
2720           CallInst *CEI = cast<CallInst>(V);
2721           OpVL.push_back(CEI->getArgOperand(j));
2722         }
2723 
2724         Value *OpVec = vectorizeTree(OpVL);
2725         DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n");
2726         OpVecs.push_back(OpVec);
2727       }
2728 
2729       Module *M = F->getParent();
2730       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
2731       Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) };
2732       Function *CF = Intrinsic::getDeclaration(M, ID, Tys);
2733       SmallVector<OperandBundleDef, 1> OpBundles;
2734       CI->getOperandBundlesAsDefs(OpBundles);
2735       Value *V = Builder.CreateCall(CF, OpVecs, OpBundles);
2736 
2737       // The scalar argument uses an in-tree scalar so we add the new vectorized
2738       // call to ExternalUses list to make sure that an extract will be
2739       // generated in the future.
2740       if (ScalarArg && ScalarToTreeEntry.count(ScalarArg))
2741         ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0));
2742 
2743       E->VectorizedValue = V;
2744       propagateIRFlags(E->VectorizedValue, E->Scalars);
2745       ++NumVectorInstructions;
2746       return V;
2747     }
2748     case Instruction::ShuffleVector: {
2749       ValueList LHSVL, RHSVL;
2750       assert(isa<BinaryOperator>(VL0) && "Invalid Shuffle Vector Operand");
2751       reorderAltShuffleOperands(E->Scalars, LHSVL, RHSVL);
2752       setInsertPointAfterBundle(E->Scalars);
2753 
2754       Value *LHS = vectorizeTree(LHSVL);
2755       Value *RHS = vectorizeTree(RHSVL);
2756 
2757       if (Value *V = alreadyVectorized(E->Scalars))
2758         return V;
2759 
2760       // Create a vector of LHS op1 RHS
2761       BinaryOperator *BinOp0 = cast<BinaryOperator>(VL0);
2762       Value *V0 = Builder.CreateBinOp(BinOp0->getOpcode(), LHS, RHS);
2763 
2764       // Create a vector of LHS op2 RHS
2765       Instruction *VL1 = cast<Instruction>(E->Scalars[1]);
2766       BinaryOperator *BinOp1 = cast<BinaryOperator>(VL1);
2767       Value *V1 = Builder.CreateBinOp(BinOp1->getOpcode(), LHS, RHS);
2768 
2769       // Create shuffle to take alternate operations from the vector.
2770       // Also, gather up odd and even scalar ops to propagate IR flags to
2771       // each vector operation.
2772       ValueList OddScalars, EvenScalars;
2773       unsigned e = E->Scalars.size();
2774       SmallVector<Constant *, 8> Mask(e);
2775       for (unsigned i = 0; i < e; ++i) {
2776         if (i & 1) {
2777           Mask[i] = Builder.getInt32(e + i);
2778           OddScalars.push_back(E->Scalars[i]);
2779         } else {
2780           Mask[i] = Builder.getInt32(i);
2781           EvenScalars.push_back(E->Scalars[i]);
2782         }
2783       }
2784 
2785       Value *ShuffleMask = ConstantVector::get(Mask);
2786       propagateIRFlags(V0, EvenScalars);
2787       propagateIRFlags(V1, OddScalars);
2788 
2789       Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
2790       E->VectorizedValue = V;
2791       ++NumVectorInstructions;
2792       if (Instruction *I = dyn_cast<Instruction>(V))
2793         return propagateMetadata(I, E->Scalars);
2794 
2795       return V;
2796     }
2797     default:
2798     llvm_unreachable("unknown inst");
2799   }
2800   return nullptr;
2801 }
2802 
2803 Value *BoUpSLP::vectorizeTree() {
2804   MapVector<Value *, DebugLoc> ExternallyUsedValues;
2805   return vectorizeTree(ExternallyUsedValues);
2806 }
2807 
2808 Value *
2809 BoUpSLP::vectorizeTree(MapVector<Value *, DebugLoc> &ExternallyUsedValues) {
2810 
2811   // All blocks must be scheduled before any instructions are inserted.
2812   for (auto &BSIter : BlocksSchedules) {
2813     scheduleBlock(BSIter.second.get());
2814   }
2815 
2816   Builder.SetInsertPoint(&F->getEntryBlock().front());
2817   auto *VectorRoot = vectorizeTree(ArrayRef<Value *>(), &VectorizableTree[0]);
2818 
2819   // If the vectorized tree can be rewritten in a smaller type, we truncate the
2820   // vectorized root. InstCombine will then rewrite the entire expression. We
2821   // sign extend the extracted values below.
2822   auto *ScalarRoot = VectorizableTree[0].Scalars[0];
2823   if (MinBWs.count(ScalarRoot)) {
2824     if (auto *I = dyn_cast<Instruction>(VectorRoot))
2825       Builder.SetInsertPoint(&*++BasicBlock::iterator(I));
2826     auto BundleWidth = VectorizableTree[0].Scalars.size();
2827     auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
2828     auto *VecTy = VectorType::get(MinTy, BundleWidth);
2829     auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy);
2830     VectorizableTree[0].VectorizedValue = Trunc;
2831   }
2832 
2833   DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n");
2834 
2835   // If necessary, sign-extend or zero-extend ScalarRoot to the larger type
2836   // specified by ScalarType.
2837   auto extend = [&](Value *ScalarRoot, Value *Ex, Type *ScalarType) {
2838     if (!MinBWs.count(ScalarRoot))
2839       return Ex;
2840     if (MinBWs[ScalarRoot].second)
2841       return Builder.CreateSExt(Ex, ScalarType);
2842     return Builder.CreateZExt(Ex, ScalarType);
2843   };
2844 
2845   // Extract all of the elements with the external uses.
2846   for (const auto &ExternalUse : ExternalUses) {
2847     Value *Scalar = ExternalUse.Scalar;
2848     llvm::User *User = ExternalUse.User;
2849 
2850     // Skip users that we already RAUW. This happens when one instruction
2851     // has multiple uses of the same value.
2852     if (User && !is_contained(Scalar->users(), User))
2853       continue;
2854     assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar");
2855 
2856     int Idx = ScalarToTreeEntry[Scalar];
2857     TreeEntry *E = &VectorizableTree[Idx];
2858     assert(!E->NeedToGather && "Extracting from a gather list");
2859 
2860     Value *Vec = E->VectorizedValue;
2861     assert(Vec && "Can't find vectorizable value");
2862 
2863     Value *Lane = Builder.getInt32(ExternalUse.Lane);
2864     // If User == nullptr, the Scalar is used as extra arg. Generate
2865     // ExtractElement instruction and update the record for this scalar in
2866     // ExternallyUsedValues.
2867     if (!User) {
2868       assert(ExternallyUsedValues.count(Scalar) &&
2869              "Scalar with nullptr as an external user must be registered in "
2870              "ExternallyUsedValues map");
2871       DebugLoc DL = ExternallyUsedValues[Scalar];
2872       if (auto *VecI = dyn_cast<Instruction>(Vec)) {
2873         Builder.SetInsertPoint(VecI->getParent(),
2874                                std::next(VecI->getIterator()));
2875       } else {
2876         Builder.SetInsertPoint(&F->getEntryBlock().front());
2877       }
2878       Value *Ex = Builder.CreateExtractElement(Vec, Lane);
2879       Ex = extend(ScalarRoot, Ex, Scalar->getType());
2880       CSEBlocks.insert(cast<Instruction>(Scalar)->getParent());
2881       ExternallyUsedValues.erase(Scalar);
2882       ExternallyUsedValues[Ex] = DL;
2883       continue;
2884     }
2885 
2886     // Generate extracts for out-of-tree users.
2887     // Find the insertion point for the extractelement lane.
2888     if (auto *VecI = dyn_cast<Instruction>(Vec)) {
2889       if (PHINode *PH = dyn_cast<PHINode>(User)) {
2890         for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) {
2891           if (PH->getIncomingValue(i) == Scalar) {
2892             TerminatorInst *IncomingTerminator =
2893                 PH->getIncomingBlock(i)->getTerminator();
2894             if (isa<CatchSwitchInst>(IncomingTerminator)) {
2895               Builder.SetInsertPoint(VecI->getParent(),
2896                                      std::next(VecI->getIterator()));
2897             } else {
2898               Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator());
2899             }
2900             Value *Ex = Builder.CreateExtractElement(Vec, Lane);
2901             Ex = extend(ScalarRoot, Ex, Scalar->getType());
2902             CSEBlocks.insert(PH->getIncomingBlock(i));
2903             PH->setOperand(i, Ex);
2904           }
2905         }
2906       } else {
2907         Builder.SetInsertPoint(cast<Instruction>(User));
2908         Value *Ex = Builder.CreateExtractElement(Vec, Lane);
2909         Ex = extend(ScalarRoot, Ex, Scalar->getType());
2910         CSEBlocks.insert(cast<Instruction>(User)->getParent());
2911         User->replaceUsesOfWith(Scalar, Ex);
2912      }
2913     } else {
2914       Builder.SetInsertPoint(&F->getEntryBlock().front());
2915       Value *Ex = Builder.CreateExtractElement(Vec, Lane);
2916       Ex = extend(ScalarRoot, Ex, Scalar->getType());
2917       CSEBlocks.insert(&F->getEntryBlock());
2918       User->replaceUsesOfWith(Scalar, Ex);
2919     }
2920 
2921     DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n");
2922   }
2923 
2924   // For each vectorized value:
2925   for (TreeEntry &EIdx : VectorizableTree) {
2926     TreeEntry *Entry = &EIdx;
2927 
2928     // For each lane:
2929     for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
2930       Value *Scalar = Entry->Scalars[Lane];
2931       // No need to handle users of gathered values.
2932       if (Entry->NeedToGather)
2933         continue;
2934 
2935       assert(Entry->VectorizedValue && "Can't find vectorizable value");
2936 
2937       Type *Ty = Scalar->getType();
2938       if (!Ty->isVoidTy()) {
2939 #ifndef NDEBUG
2940         for (User *U : Scalar->users()) {
2941           DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
2942 
2943           assert((ScalarToTreeEntry.count(U) ||
2944                   // It is legal to replace users in the ignorelist by undef.
2945                   is_contained(UserIgnoreList, U)) &&
2946                  "Replacing out-of-tree value with undef");
2947         }
2948 #endif
2949         Value *Undef = UndefValue::get(Ty);
2950         Scalar->replaceAllUsesWith(Undef);
2951       }
2952       DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
2953       eraseInstruction(cast<Instruction>(Scalar));
2954     }
2955   }
2956 
2957   Builder.ClearInsertionPoint();
2958 
2959   return VectorizableTree[0].VectorizedValue;
2960 }
2961 
2962 void BoUpSLP::optimizeGatherSequence() {
2963   DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size()
2964         << " gather sequences instructions.\n");
2965   // LICM InsertElementInst sequences.
2966   for (Instruction *it : GatherSeq) {
2967     InsertElementInst *Insert = dyn_cast<InsertElementInst>(it);
2968 
2969     if (!Insert)
2970       continue;
2971 
2972     // Check if this block is inside a loop.
2973     Loop *L = LI->getLoopFor(Insert->getParent());
2974     if (!L)
2975       continue;
2976 
2977     // Check if it has a preheader.
2978     BasicBlock *PreHeader = L->getLoopPreheader();
2979     if (!PreHeader)
2980       continue;
2981 
2982     // If the vector or the element that we insert into it are
2983     // instructions that are defined in this basic block then we can't
2984     // hoist this instruction.
2985     Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0));
2986     Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1));
2987     if (CurrVec && L->contains(CurrVec))
2988       continue;
2989     if (NewElem && L->contains(NewElem))
2990       continue;
2991 
2992     // We can hoist this instruction. Move it to the pre-header.
2993     Insert->moveBefore(PreHeader->getTerminator());
2994   }
2995 
2996   // Make a list of all reachable blocks in our CSE queue.
2997   SmallVector<const DomTreeNode *, 8> CSEWorkList;
2998   CSEWorkList.reserve(CSEBlocks.size());
2999   for (BasicBlock *BB : CSEBlocks)
3000     if (DomTreeNode *N = DT->getNode(BB)) {
3001       assert(DT->isReachableFromEntry(N));
3002       CSEWorkList.push_back(N);
3003     }
3004 
3005   // Sort blocks by domination. This ensures we visit a block after all blocks
3006   // dominating it are visited.
3007   std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(),
3008                    [this](const DomTreeNode *A, const DomTreeNode *B) {
3009     return DT->properlyDominates(A, B);
3010   });
3011 
3012   // Perform O(N^2) search over the gather sequences and merge identical
3013   // instructions. TODO: We can further optimize this scan if we split the
3014   // instructions into different buckets based on the insert lane.
3015   SmallVector<Instruction *, 16> Visited;
3016   for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) {
3017     assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&
3018            "Worklist not sorted properly!");
3019     BasicBlock *BB = (*I)->getBlock();
3020     // For all instructions in blocks containing gather sequences:
3021     for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) {
3022       Instruction *In = &*it++;
3023       if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In))
3024         continue;
3025 
3026       // Check if we can replace this instruction with any of the
3027       // visited instructions.
3028       for (Instruction *v : Visited) {
3029         if (In->isIdenticalTo(v) &&
3030             DT->dominates(v->getParent(), In->getParent())) {
3031           In->replaceAllUsesWith(v);
3032           eraseInstruction(In);
3033           In = nullptr;
3034           break;
3035         }
3036       }
3037       if (In) {
3038         assert(!is_contained(Visited, In));
3039         Visited.push_back(In);
3040       }
3041     }
3042   }
3043   CSEBlocks.clear();
3044   GatherSeq.clear();
3045 }
3046 
3047 // Groups the instructions to a bundle (which is then a single scheduling entity)
3048 // and schedules instructions until the bundle gets ready.
3049 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL,
3050                                                  BoUpSLP *SLP) {
3051   if (isa<PHINode>(VL[0]))
3052     return true;
3053 
3054   // Initialize the instruction bundle.
3055   Instruction *OldScheduleEnd = ScheduleEnd;
3056   ScheduleData *PrevInBundle = nullptr;
3057   ScheduleData *Bundle = nullptr;
3058   bool ReSchedule = false;
3059   DEBUG(dbgs() << "SLP:  bundle: " << *VL[0] << "\n");
3060 
3061   // Make sure that the scheduling region contains all
3062   // instructions of the bundle.
3063   for (Value *V : VL) {
3064     if (!extendSchedulingRegion(V))
3065       return false;
3066   }
3067 
3068   for (Value *V : VL) {
3069     ScheduleData *BundleMember = getScheduleData(V);
3070     assert(BundleMember &&
3071            "no ScheduleData for bundle member (maybe not in same basic block)");
3072     if (BundleMember->IsScheduled) {
3073       // A bundle member was scheduled as single instruction before and now
3074       // needs to be scheduled as part of the bundle. We just get rid of the
3075       // existing schedule.
3076       DEBUG(dbgs() << "SLP:  reset schedule because " << *BundleMember
3077                    << " was already scheduled\n");
3078       ReSchedule = true;
3079     }
3080     assert(BundleMember->isSchedulingEntity() &&
3081            "bundle member already part of other bundle");
3082     if (PrevInBundle) {
3083       PrevInBundle->NextInBundle = BundleMember;
3084     } else {
3085       Bundle = BundleMember;
3086     }
3087     BundleMember->UnscheduledDepsInBundle = 0;
3088     Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps;
3089 
3090     // Group the instructions to a bundle.
3091     BundleMember->FirstInBundle = Bundle;
3092     PrevInBundle = BundleMember;
3093   }
3094   if (ScheduleEnd != OldScheduleEnd) {
3095     // The scheduling region got new instructions at the lower end (or it is a
3096     // new region for the first bundle). This makes it necessary to
3097     // recalculate all dependencies.
3098     // It is seldom that this needs to be done a second time after adding the
3099     // initial bundle to the region.
3100     for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
3101       ScheduleData *SD = getScheduleData(I);
3102       SD->clearDependencies();
3103     }
3104     ReSchedule = true;
3105   }
3106   if (ReSchedule) {
3107     resetSchedule();
3108     initialFillReadyList(ReadyInsts);
3109   }
3110 
3111   DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block "
3112                << BB->getName() << "\n");
3113 
3114   calculateDependencies(Bundle, true, SLP);
3115 
3116   // Now try to schedule the new bundle. As soon as the bundle is "ready" it
3117   // means that there are no cyclic dependencies and we can schedule it.
3118   // Note that's important that we don't "schedule" the bundle yet (see
3119   // cancelScheduling).
3120   while (!Bundle->isReady() && !ReadyInsts.empty()) {
3121 
3122     ScheduleData *pickedSD = ReadyInsts.back();
3123     ReadyInsts.pop_back();
3124 
3125     if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) {
3126       schedule(pickedSD, ReadyInsts);
3127     }
3128   }
3129   if (!Bundle->isReady()) {
3130     cancelScheduling(VL);
3131     return false;
3132   }
3133   return true;
3134 }
3135 
3136 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL) {
3137   if (isa<PHINode>(VL[0]))
3138     return;
3139 
3140   ScheduleData *Bundle = getScheduleData(VL[0]);
3141   DEBUG(dbgs() << "SLP:  cancel scheduling of " << *Bundle << "\n");
3142   assert(!Bundle->IsScheduled &&
3143          "Can't cancel bundle which is already scheduled");
3144   assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() &&
3145          "tried to unbundle something which is not a bundle");
3146 
3147   // Un-bundle: make single instructions out of the bundle.
3148   ScheduleData *BundleMember = Bundle;
3149   while (BundleMember) {
3150     assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links");
3151     BundleMember->FirstInBundle = BundleMember;
3152     ScheduleData *Next = BundleMember->NextInBundle;
3153     BundleMember->NextInBundle = nullptr;
3154     BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps;
3155     if (BundleMember->UnscheduledDepsInBundle == 0) {
3156       ReadyInsts.insert(BundleMember);
3157     }
3158     BundleMember = Next;
3159   }
3160 }
3161 
3162 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V) {
3163   if (getScheduleData(V))
3164     return true;
3165   Instruction *I = dyn_cast<Instruction>(V);
3166   assert(I && "bundle member must be an instruction");
3167   assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled");
3168   if (!ScheduleStart) {
3169     // It's the first instruction in the new region.
3170     initScheduleData(I, I->getNextNode(), nullptr, nullptr);
3171     ScheduleStart = I;
3172     ScheduleEnd = I->getNextNode();
3173     assert(ScheduleEnd && "tried to vectorize a TerminatorInst?");
3174     DEBUG(dbgs() << "SLP:  initialize schedule region to " << *I << "\n");
3175     return true;
3176   }
3177   // Search up and down at the same time, because we don't know if the new
3178   // instruction is above or below the existing scheduling region.
3179   BasicBlock::reverse_iterator UpIter =
3180       ++ScheduleStart->getIterator().getReverse();
3181   BasicBlock::reverse_iterator UpperEnd = BB->rend();
3182   BasicBlock::iterator DownIter = ScheduleEnd->getIterator();
3183   BasicBlock::iterator LowerEnd = BB->end();
3184   for (;;) {
3185     if (++ScheduleRegionSize > ScheduleRegionSizeLimit) {
3186       DEBUG(dbgs() << "SLP:  exceeded schedule region size limit\n");
3187       return false;
3188     }
3189 
3190     if (UpIter != UpperEnd) {
3191       if (&*UpIter == I) {
3192         initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion);
3193         ScheduleStart = I;
3194         DEBUG(dbgs() << "SLP:  extend schedule region start to " << *I << "\n");
3195         return true;
3196       }
3197       UpIter++;
3198     }
3199     if (DownIter != LowerEnd) {
3200       if (&*DownIter == I) {
3201         initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion,
3202                          nullptr);
3203         ScheduleEnd = I->getNextNode();
3204         assert(ScheduleEnd && "tried to vectorize a TerminatorInst?");
3205         DEBUG(dbgs() << "SLP:  extend schedule region end to " << *I << "\n");
3206         return true;
3207       }
3208       DownIter++;
3209     }
3210     assert((UpIter != UpperEnd || DownIter != LowerEnd) &&
3211            "instruction not found in block");
3212   }
3213   return true;
3214 }
3215 
3216 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI,
3217                                                 Instruction *ToI,
3218                                                 ScheduleData *PrevLoadStore,
3219                                                 ScheduleData *NextLoadStore) {
3220   ScheduleData *CurrentLoadStore = PrevLoadStore;
3221   for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) {
3222     ScheduleData *SD = ScheduleDataMap[I];
3223     if (!SD) {
3224       // Allocate a new ScheduleData for the instruction.
3225       if (ChunkPos >= ChunkSize) {
3226         ScheduleDataChunks.push_back(
3227             llvm::make_unique<ScheduleData[]>(ChunkSize));
3228         ChunkPos = 0;
3229       }
3230       SD = &(ScheduleDataChunks.back()[ChunkPos++]);
3231       ScheduleDataMap[I] = SD;
3232       SD->Inst = I;
3233     }
3234     assert(!isInSchedulingRegion(SD) &&
3235            "new ScheduleData already in scheduling region");
3236     SD->init(SchedulingRegionID);
3237 
3238     if (I->mayReadOrWriteMemory()) {
3239       // Update the linked list of memory accessing instructions.
3240       if (CurrentLoadStore) {
3241         CurrentLoadStore->NextLoadStore = SD;
3242       } else {
3243         FirstLoadStoreInRegion = SD;
3244       }
3245       CurrentLoadStore = SD;
3246     }
3247   }
3248   if (NextLoadStore) {
3249     if (CurrentLoadStore)
3250       CurrentLoadStore->NextLoadStore = NextLoadStore;
3251   } else {
3252     LastLoadStoreInRegion = CurrentLoadStore;
3253   }
3254 }
3255 
3256 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
3257                                                      bool InsertInReadyList,
3258                                                      BoUpSLP *SLP) {
3259   assert(SD->isSchedulingEntity());
3260 
3261   SmallVector<ScheduleData *, 10> WorkList;
3262   WorkList.push_back(SD);
3263 
3264   while (!WorkList.empty()) {
3265     ScheduleData *SD = WorkList.back();
3266     WorkList.pop_back();
3267 
3268     ScheduleData *BundleMember = SD;
3269     while (BundleMember) {
3270       assert(isInSchedulingRegion(BundleMember));
3271       if (!BundleMember->hasValidDependencies()) {
3272 
3273         DEBUG(dbgs() << "SLP:       update deps of " << *BundleMember << "\n");
3274         BundleMember->Dependencies = 0;
3275         BundleMember->resetUnscheduledDeps();
3276 
3277         // Handle def-use chain dependencies.
3278         for (User *U : BundleMember->Inst->users()) {
3279           if (isa<Instruction>(U)) {
3280             ScheduleData *UseSD = getScheduleData(U);
3281             if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
3282               BundleMember->Dependencies++;
3283               ScheduleData *DestBundle = UseSD->FirstInBundle;
3284               if (!DestBundle->IsScheduled) {
3285                 BundleMember->incrementUnscheduledDeps(1);
3286               }
3287               if (!DestBundle->hasValidDependencies()) {
3288                 WorkList.push_back(DestBundle);
3289               }
3290             }
3291           } else {
3292             // I'm not sure if this can ever happen. But we need to be safe.
3293             // This lets the instruction/bundle never be scheduled and
3294             // eventually disable vectorization.
3295             BundleMember->Dependencies++;
3296             BundleMember->incrementUnscheduledDeps(1);
3297           }
3298         }
3299 
3300         // Handle the memory dependencies.
3301         ScheduleData *DepDest = BundleMember->NextLoadStore;
3302         if (DepDest) {
3303           Instruction *SrcInst = BundleMember->Inst;
3304           MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA);
3305           bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
3306           unsigned numAliased = 0;
3307           unsigned DistToSrc = 1;
3308 
3309           while (DepDest) {
3310             assert(isInSchedulingRegion(DepDest));
3311 
3312             // We have two limits to reduce the complexity:
3313             // 1) AliasedCheckLimit: It's a small limit to reduce calls to
3314             //    SLP->isAliased (which is the expensive part in this loop).
3315             // 2) MaxMemDepDistance: It's for very large blocks and it aborts
3316             //    the whole loop (even if the loop is fast, it's quadratic).
3317             //    It's important for the loop break condition (see below) to
3318             //    check this limit even between two read-only instructions.
3319             if (DistToSrc >= MaxMemDepDistance ||
3320                     ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
3321                      (numAliased >= AliasedCheckLimit ||
3322                       SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
3323 
3324               // We increment the counter only if the locations are aliased
3325               // (instead of counting all alias checks). This gives a better
3326               // balance between reduced runtime and accurate dependencies.
3327               numAliased++;
3328 
3329               DepDest->MemoryDependencies.push_back(BundleMember);
3330               BundleMember->Dependencies++;
3331               ScheduleData *DestBundle = DepDest->FirstInBundle;
3332               if (!DestBundle->IsScheduled) {
3333                 BundleMember->incrementUnscheduledDeps(1);
3334               }
3335               if (!DestBundle->hasValidDependencies()) {
3336                 WorkList.push_back(DestBundle);
3337               }
3338             }
3339             DepDest = DepDest->NextLoadStore;
3340 
3341             // Example, explaining the loop break condition: Let's assume our
3342             // starting instruction is i0 and MaxMemDepDistance = 3.
3343             //
3344             //                      +--------v--v--v
3345             //             i0,i1,i2,i3,i4,i5,i6,i7,i8
3346             //             +--------^--^--^
3347             //
3348             // MaxMemDepDistance let us stop alias-checking at i3 and we add
3349             // dependencies from i0 to i3,i4,.. (even if they are not aliased).
3350             // Previously we already added dependencies from i3 to i6,i7,i8
3351             // (because of MaxMemDepDistance). As we added a dependency from
3352             // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
3353             // and we can abort this loop at i6.
3354             if (DistToSrc >= 2 * MaxMemDepDistance)
3355                 break;
3356             DistToSrc++;
3357           }
3358         }
3359       }
3360       BundleMember = BundleMember->NextInBundle;
3361     }
3362     if (InsertInReadyList && SD->isReady()) {
3363       ReadyInsts.push_back(SD);
3364       DEBUG(dbgs() << "SLP:     gets ready on update: " << *SD->Inst << "\n");
3365     }
3366   }
3367 }
3368 
3369 void BoUpSLP::BlockScheduling::resetSchedule() {
3370   assert(ScheduleStart &&
3371          "tried to reset schedule on block which has not been scheduled");
3372   for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
3373     ScheduleData *SD = getScheduleData(I);
3374     assert(isInSchedulingRegion(SD));
3375     SD->IsScheduled = false;
3376     SD->resetUnscheduledDeps();
3377   }
3378   ReadyInsts.clear();
3379 }
3380 
3381 void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
3382 
3383   if (!BS->ScheduleStart)
3384     return;
3385 
3386   DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n");
3387 
3388   BS->resetSchedule();
3389 
3390   // For the real scheduling we use a more sophisticated ready-list: it is
3391   // sorted by the original instruction location. This lets the final schedule
3392   // be as  close as possible to the original instruction order.
3393   struct ScheduleDataCompare {
3394     bool operator()(ScheduleData *SD1, ScheduleData *SD2) const {
3395       return SD2->SchedulingPriority < SD1->SchedulingPriority;
3396     }
3397   };
3398   std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts;
3399 
3400   // Ensure that all dependency data is updated and fill the ready-list with
3401   // initial instructions.
3402   int Idx = 0;
3403   int NumToSchedule = 0;
3404   for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd;
3405        I = I->getNextNode()) {
3406     ScheduleData *SD = BS->getScheduleData(I);
3407     assert(
3408         SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD->Inst) != 0) &&
3409         "scheduler and vectorizer have different opinion on what is a bundle");
3410     SD->FirstInBundle->SchedulingPriority = Idx++;
3411     if (SD->isSchedulingEntity()) {
3412       BS->calculateDependencies(SD, false, this);
3413       NumToSchedule++;
3414     }
3415   }
3416   BS->initialFillReadyList(ReadyInsts);
3417 
3418   Instruction *LastScheduledInst = BS->ScheduleEnd;
3419 
3420   // Do the "real" scheduling.
3421   while (!ReadyInsts.empty()) {
3422     ScheduleData *picked = *ReadyInsts.begin();
3423     ReadyInsts.erase(ReadyInsts.begin());
3424 
3425     // Move the scheduled instruction(s) to their dedicated places, if not
3426     // there yet.
3427     ScheduleData *BundleMember = picked;
3428     while (BundleMember) {
3429       Instruction *pickedInst = BundleMember->Inst;
3430       if (LastScheduledInst->getNextNode() != pickedInst) {
3431         BS->BB->getInstList().remove(pickedInst);
3432         BS->BB->getInstList().insert(LastScheduledInst->getIterator(),
3433                                      pickedInst);
3434       }
3435       LastScheduledInst = pickedInst;
3436       BundleMember = BundleMember->NextInBundle;
3437     }
3438 
3439     BS->schedule(picked, ReadyInsts);
3440     NumToSchedule--;
3441   }
3442   assert(NumToSchedule == 0 && "could not schedule all instructions");
3443 
3444   // Avoid duplicate scheduling of the block.
3445   BS->ScheduleStart = nullptr;
3446 }
3447 
3448 unsigned BoUpSLP::getVectorElementSize(Value *V) {
3449   // If V is a store, just return the width of the stored value without
3450   // traversing the expression tree. This is the common case.
3451   if (auto *Store = dyn_cast<StoreInst>(V))
3452     return DL->getTypeSizeInBits(Store->getValueOperand()->getType());
3453 
3454   // If V is not a store, we can traverse the expression tree to find loads
3455   // that feed it. The type of the loaded value may indicate a more suitable
3456   // width than V's type. We want to base the vector element size on the width
3457   // of memory operations where possible.
3458   SmallVector<Instruction *, 16> Worklist;
3459   SmallPtrSet<Instruction *, 16> Visited;
3460   if (auto *I = dyn_cast<Instruction>(V))
3461     Worklist.push_back(I);
3462 
3463   // Traverse the expression tree in bottom-up order looking for loads. If we
3464   // encounter an instruciton we don't yet handle, we give up.
3465   auto MaxWidth = 0u;
3466   auto FoundUnknownInst = false;
3467   while (!Worklist.empty() && !FoundUnknownInst) {
3468     auto *I = Worklist.pop_back_val();
3469     Visited.insert(I);
3470 
3471     // We should only be looking at scalar instructions here. If the current
3472     // instruction has a vector type, give up.
3473     auto *Ty = I->getType();
3474     if (isa<VectorType>(Ty))
3475       FoundUnknownInst = true;
3476 
3477     // If the current instruction is a load, update MaxWidth to reflect the
3478     // width of the loaded value.
3479     else if (isa<LoadInst>(I))
3480       MaxWidth = std::max<unsigned>(MaxWidth, DL->getTypeSizeInBits(Ty));
3481 
3482     // Otherwise, we need to visit the operands of the instruction. We only
3483     // handle the interesting cases from buildTree here. If an operand is an
3484     // instruction we haven't yet visited, we add it to the worklist.
3485     else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
3486              isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I)) {
3487       for (Use &U : I->operands())
3488         if (auto *J = dyn_cast<Instruction>(U.get()))
3489           if (!Visited.count(J))
3490             Worklist.push_back(J);
3491     }
3492 
3493     // If we don't yet handle the instruction, give up.
3494     else
3495       FoundUnknownInst = true;
3496   }
3497 
3498   // If we didn't encounter a memory access in the expression tree, or if we
3499   // gave up for some reason, just return the width of V.
3500   if (!MaxWidth || FoundUnknownInst)
3501     return DL->getTypeSizeInBits(V->getType());
3502 
3503   // Otherwise, return the maximum width we found.
3504   return MaxWidth;
3505 }
3506 
3507 // Determine if a value V in a vectorizable expression Expr can be demoted to a
3508 // smaller type with a truncation. We collect the values that will be demoted
3509 // in ToDemote and additional roots that require investigating in Roots.
3510 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr,
3511                                   SmallVectorImpl<Value *> &ToDemote,
3512                                   SmallVectorImpl<Value *> &Roots) {
3513 
3514   // We can always demote constants.
3515   if (isa<Constant>(V)) {
3516     ToDemote.push_back(V);
3517     return true;
3518   }
3519 
3520   // If the value is not an instruction in the expression with only one use, it
3521   // cannot be demoted.
3522   auto *I = dyn_cast<Instruction>(V);
3523   if (!I || !I->hasOneUse() || !Expr.count(I))
3524     return false;
3525 
3526   switch (I->getOpcode()) {
3527 
3528   // We can always demote truncations and extensions. Since truncations can
3529   // seed additional demotion, we save the truncated value.
3530   case Instruction::Trunc:
3531     Roots.push_back(I->getOperand(0));
3532   case Instruction::ZExt:
3533   case Instruction::SExt:
3534     break;
3535 
3536   // We can demote certain binary operations if we can demote both of their
3537   // operands.
3538   case Instruction::Add:
3539   case Instruction::Sub:
3540   case Instruction::Mul:
3541   case Instruction::And:
3542   case Instruction::Or:
3543   case Instruction::Xor:
3544     if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) ||
3545         !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots))
3546       return false;
3547     break;
3548 
3549   // We can demote selects if we can demote their true and false values.
3550   case Instruction::Select: {
3551     SelectInst *SI = cast<SelectInst>(I);
3552     if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) ||
3553         !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots))
3554       return false;
3555     break;
3556   }
3557 
3558   // We can demote phis if we can demote all their incoming operands. Note that
3559   // we don't need to worry about cycles since we ensure single use above.
3560   case Instruction::PHI: {
3561     PHINode *PN = cast<PHINode>(I);
3562     for (Value *IncValue : PN->incoming_values())
3563       if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots))
3564         return false;
3565     break;
3566   }
3567 
3568   // Otherwise, conservatively give up.
3569   default:
3570     return false;
3571   }
3572 
3573   // Record the value that we can demote.
3574   ToDemote.push_back(V);
3575   return true;
3576 }
3577 
3578 void BoUpSLP::computeMinimumValueSizes() {
3579   // If there are no external uses, the expression tree must be rooted by a
3580   // store. We can't demote in-memory values, so there is nothing to do here.
3581   if (ExternalUses.empty())
3582     return;
3583 
3584   // We only attempt to truncate integer expressions.
3585   auto &TreeRoot = VectorizableTree[0].Scalars;
3586   auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType());
3587   if (!TreeRootIT)
3588     return;
3589 
3590   // If the expression is not rooted by a store, these roots should have
3591   // external uses. We will rely on InstCombine to rewrite the expression in
3592   // the narrower type. However, InstCombine only rewrites single-use values.
3593   // This means that if a tree entry other than a root is used externally, it
3594   // must have multiple uses and InstCombine will not rewrite it. The code
3595   // below ensures that only the roots are used externally.
3596   SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end());
3597   for (auto &EU : ExternalUses)
3598     if (!Expr.erase(EU.Scalar))
3599       return;
3600   if (!Expr.empty())
3601     return;
3602 
3603   // Collect the scalar values of the vectorizable expression. We will use this
3604   // context to determine which values can be demoted. If we see a truncation,
3605   // we mark it as seeding another demotion.
3606   for (auto &Entry : VectorizableTree)
3607     Expr.insert(Entry.Scalars.begin(), Entry.Scalars.end());
3608 
3609   // Ensure the roots of the vectorizable tree don't form a cycle. They must
3610   // have a single external user that is not in the vectorizable tree.
3611   for (auto *Root : TreeRoot)
3612     if (!Root->hasOneUse() || Expr.count(*Root->user_begin()))
3613       return;
3614 
3615   // Conservatively determine if we can actually truncate the roots of the
3616   // expression. Collect the values that can be demoted in ToDemote and
3617   // additional roots that require investigating in Roots.
3618   SmallVector<Value *, 32> ToDemote;
3619   SmallVector<Value *, 4> Roots;
3620   for (auto *Root : TreeRoot)
3621     if (!collectValuesToDemote(Root, Expr, ToDemote, Roots))
3622       return;
3623 
3624   // The maximum bit width required to represent all the values that can be
3625   // demoted without loss of precision. It would be safe to truncate the roots
3626   // of the expression to this width.
3627   auto MaxBitWidth = 8u;
3628 
3629   // We first check if all the bits of the roots are demanded. If they're not,
3630   // we can truncate the roots to this narrower type.
3631   for (auto *Root : TreeRoot) {
3632     auto Mask = DB->getDemandedBits(cast<Instruction>(Root));
3633     MaxBitWidth = std::max<unsigned>(
3634         Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth);
3635   }
3636 
3637   // True if the roots can be zero-extended back to their original type, rather
3638   // than sign-extended. We know that if the leading bits are not demanded, we
3639   // can safely zero-extend. So we initialize IsKnownPositive to True.
3640   bool IsKnownPositive = true;
3641 
3642   // If all the bits of the roots are demanded, we can try a little harder to
3643   // compute a narrower type. This can happen, for example, if the roots are
3644   // getelementptr indices. InstCombine promotes these indices to the pointer
3645   // width. Thus, all their bits are technically demanded even though the
3646   // address computation might be vectorized in a smaller type.
3647   //
3648   // We start by looking at each entry that can be demoted. We compute the
3649   // maximum bit width required to store the scalar by using ValueTracking to
3650   // compute the number of high-order bits we can truncate.
3651   if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType())) {
3652     MaxBitWidth = 8u;
3653 
3654     // Determine if the sign bit of all the roots is known to be zero. If not,
3655     // IsKnownPositive is set to False.
3656     IsKnownPositive = all_of(TreeRoot, [&](Value *R) {
3657       bool KnownZero = false;
3658       bool KnownOne = false;
3659       ComputeSignBit(R, KnownZero, KnownOne, *DL);
3660       return KnownZero;
3661     });
3662 
3663     // Determine the maximum number of bits required to store the scalar
3664     // values.
3665     for (auto *Scalar : ToDemote) {
3666       auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, 0, DT);
3667       auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType());
3668       MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth);
3669     }
3670 
3671     // If we can't prove that the sign bit is zero, we must add one to the
3672     // maximum bit width to account for the unknown sign bit. This preserves
3673     // the existing sign bit so we can safely sign-extend the root back to the
3674     // original type. Otherwise, if we know the sign bit is zero, we will
3675     // zero-extend the root instead.
3676     //
3677     // FIXME: This is somewhat suboptimal, as there will be cases where adding
3678     //        one to the maximum bit width will yield a larger-than-necessary
3679     //        type. In general, we need to add an extra bit only if we can't
3680     //        prove that the upper bit of the original type is equal to the
3681     //        upper bit of the proposed smaller type. If these two bits are the
3682     //        same (either zero or one) we know that sign-extending from the
3683     //        smaller type will result in the same value. Here, since we can't
3684     //        yet prove this, we are just making the proposed smaller type
3685     //        larger to ensure correctness.
3686     if (!IsKnownPositive)
3687       ++MaxBitWidth;
3688   }
3689 
3690   // Round MaxBitWidth up to the next power-of-two.
3691   if (!isPowerOf2_64(MaxBitWidth))
3692     MaxBitWidth = NextPowerOf2(MaxBitWidth);
3693 
3694   // If the maximum bit width we compute is less than the with of the roots'
3695   // type, we can proceed with the narrowing. Otherwise, do nothing.
3696   if (MaxBitWidth >= TreeRootIT->getBitWidth())
3697     return;
3698 
3699   // If we can truncate the root, we must collect additional values that might
3700   // be demoted as a result. That is, those seeded by truncations we will
3701   // modify.
3702   while (!Roots.empty())
3703     collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots);
3704 
3705   // Finally, map the values we can demote to the maximum bit with we computed.
3706   for (auto *Scalar : ToDemote)
3707     MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive);
3708 }
3709 
3710 namespace {
3711 /// The SLPVectorizer Pass.
3712 struct SLPVectorizer : public FunctionPass {
3713   SLPVectorizerPass Impl;
3714 
3715   /// Pass identification, replacement for typeid
3716   static char ID;
3717 
3718   explicit SLPVectorizer() : FunctionPass(ID) {
3719     initializeSLPVectorizerPass(*PassRegistry::getPassRegistry());
3720   }
3721 
3722 
3723   bool doInitialization(Module &M) override {
3724     return false;
3725   }
3726 
3727   bool runOnFunction(Function &F) override {
3728     if (skipFunction(F))
3729       return false;
3730 
3731     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
3732     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
3733     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
3734     auto *TLI = TLIP ? &TLIP->getTLI() : nullptr;
3735     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3736     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
3737     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
3738     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
3739     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
3740 
3741     return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB);
3742   }
3743 
3744   void getAnalysisUsage(AnalysisUsage &AU) const override {
3745     FunctionPass::getAnalysisUsage(AU);
3746     AU.addRequired<AssumptionCacheTracker>();
3747     AU.addRequired<ScalarEvolutionWrapperPass>();
3748     AU.addRequired<AAResultsWrapperPass>();
3749     AU.addRequired<TargetTransformInfoWrapperPass>();
3750     AU.addRequired<LoopInfoWrapperPass>();
3751     AU.addRequired<DominatorTreeWrapperPass>();
3752     AU.addRequired<DemandedBitsWrapperPass>();
3753     AU.addPreserved<LoopInfoWrapperPass>();
3754     AU.addPreserved<DominatorTreeWrapperPass>();
3755     AU.addPreserved<AAResultsWrapperPass>();
3756     AU.addPreserved<GlobalsAAWrapperPass>();
3757     AU.setPreservesCFG();
3758   }
3759 };
3760 } // end anonymous namespace
3761 
3762 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) {
3763   auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
3764   auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
3765   auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F);
3766   auto *AA = &AM.getResult<AAManager>(F);
3767   auto *LI = &AM.getResult<LoopAnalysis>(F);
3768   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
3769   auto *AC = &AM.getResult<AssumptionAnalysis>(F);
3770   auto *DB = &AM.getResult<DemandedBitsAnalysis>(F);
3771 
3772   bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB);
3773   if (!Changed)
3774     return PreservedAnalyses::all();
3775 
3776   PreservedAnalyses PA;
3777   PA.preserveSet<CFGAnalyses>();
3778   PA.preserve<AAManager>();
3779   PA.preserve<GlobalsAA>();
3780   return PA;
3781 }
3782 
3783 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
3784                                 TargetTransformInfo *TTI_,
3785                                 TargetLibraryInfo *TLI_, AliasAnalysis *AA_,
3786                                 LoopInfo *LI_, DominatorTree *DT_,
3787                                 AssumptionCache *AC_, DemandedBits *DB_) {
3788   SE = SE_;
3789   TTI = TTI_;
3790   TLI = TLI_;
3791   AA = AA_;
3792   LI = LI_;
3793   DT = DT_;
3794   AC = AC_;
3795   DB = DB_;
3796   DL = &F.getParent()->getDataLayout();
3797 
3798   Stores.clear();
3799   GEPs.clear();
3800   bool Changed = false;
3801 
3802   // If the target claims to have no vector registers don't attempt
3803   // vectorization.
3804   if (!TTI->getNumberOfRegisters(true))
3805     return false;
3806 
3807   // Don't vectorize when the attribute NoImplicitFloat is used.
3808   if (F.hasFnAttribute(Attribute::NoImplicitFloat))
3809     return false;
3810 
3811   DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
3812 
3813   // Use the bottom up slp vectorizer to construct chains that start with
3814   // store instructions.
3815   BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL);
3816 
3817   // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to
3818   // delete instructions.
3819 
3820   // Scan the blocks in the function in post order.
3821   for (auto BB : post_order(&F.getEntryBlock())) {
3822     collectSeedInstructions(BB);
3823 
3824     // Vectorize trees that end at stores.
3825     if (!Stores.empty()) {
3826       DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()
3827                    << " underlying objects.\n");
3828       Changed |= vectorizeStoreChains(R);
3829     }
3830 
3831     // Vectorize trees that end at reductions.
3832     Changed |= vectorizeChainsInBlock(BB, R);
3833 
3834     // Vectorize the index computations of getelementptr instructions. This
3835     // is primarily intended to catch gather-like idioms ending at
3836     // non-consecutive loads.
3837     if (!GEPs.empty()) {
3838       DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()
3839                    << " underlying objects.\n");
3840       Changed |= vectorizeGEPIndices(BB, R);
3841     }
3842   }
3843 
3844   if (Changed) {
3845     R.optimizeGatherSequence();
3846     DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
3847     DEBUG(verifyFunction(F));
3848   }
3849   return Changed;
3850 }
3851 
3852 /// \brief Check that the Values in the slice in VL array are still existent in
3853 /// the WeakVH array.
3854 /// Vectorization of part of the VL array may cause later values in the VL array
3855 /// to become invalid. We track when this has happened in the WeakVH array.
3856 static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, ArrayRef<WeakVH> VH,
3857                                unsigned SliceBegin, unsigned SliceSize) {
3858   VL = VL.slice(SliceBegin, SliceSize);
3859   VH = VH.slice(SliceBegin, SliceSize);
3860   return !std::equal(VL.begin(), VL.end(), VH.begin());
3861 }
3862 
3863 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
3864                                             unsigned VecRegSize) {
3865   unsigned ChainLen = Chain.size();
3866   DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen
3867         << "\n");
3868   unsigned Sz = R.getVectorElementSize(Chain[0]);
3869   unsigned VF = VecRegSize / Sz;
3870 
3871   if (!isPowerOf2_32(Sz) || VF < 2)
3872     return false;
3873 
3874   // Keep track of values that were deleted by vectorizing in the loop below.
3875   SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end());
3876 
3877   bool Changed = false;
3878   // Look for profitable vectorizable trees at all offsets, starting at zero.
3879   for (unsigned i = 0, e = ChainLen; i < e; ++i) {
3880     if (i + VF > e)
3881       break;
3882 
3883     // Check that a previous iteration of this loop did not delete the Value.
3884     if (hasValueBeenRAUWed(Chain, TrackValues, i, VF))
3885       continue;
3886 
3887     DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i
3888           << "\n");
3889     ArrayRef<Value *> Operands = Chain.slice(i, VF);
3890 
3891     R.buildTree(Operands);
3892     if (R.isTreeTinyAndNotFullyVectorizable())
3893       continue;
3894 
3895     R.computeMinimumValueSizes();
3896 
3897     int Cost = R.getTreeCost();
3898 
3899     DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n");
3900     if (Cost < -SLPCostThreshold) {
3901       DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n");
3902       R.vectorizeTree();
3903 
3904       // Move to the next bundle.
3905       i += VF - 1;
3906       Changed = true;
3907     }
3908   }
3909 
3910   return Changed;
3911 }
3912 
3913 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores,
3914                                         BoUpSLP &R) {
3915   SetVector<StoreInst *> Heads, Tails;
3916   SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain;
3917 
3918   // We may run into multiple chains that merge into a single chain. We mark the
3919   // stores that we vectorized so that we don't visit the same store twice.
3920   BoUpSLP::ValueSet VectorizedStores;
3921   bool Changed = false;
3922 
3923   // Do a quadratic search on all of the given stores and find
3924   // all of the pairs of stores that follow each other.
3925   SmallVector<unsigned, 16> IndexQueue;
3926   for (unsigned i = 0, e = Stores.size(); i < e; ++i) {
3927     IndexQueue.clear();
3928     // If a store has multiple consecutive store candidates, search Stores
3929     // array according to the sequence: from i+1 to e, then from i-1 to 0.
3930     // This is because usually pairing with immediate succeeding or preceding
3931     // candidate create the best chance to find slp vectorization opportunity.
3932     unsigned j = 0;
3933     for (j = i + 1; j < e; ++j)
3934       IndexQueue.push_back(j);
3935     for (j = i; j > 0; --j)
3936       IndexQueue.push_back(j - 1);
3937 
3938     for (auto &k : IndexQueue) {
3939       if (isConsecutiveAccess(Stores[i], Stores[k], *DL, *SE)) {
3940         Tails.insert(Stores[k]);
3941         Heads.insert(Stores[i]);
3942         ConsecutiveChain[Stores[i]] = Stores[k];
3943         break;
3944       }
3945     }
3946   }
3947 
3948   // For stores that start but don't end a link in the chain:
3949   for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end();
3950        it != e; ++it) {
3951     if (Tails.count(*it))
3952       continue;
3953 
3954     // We found a store instr that starts a chain. Now follow the chain and try
3955     // to vectorize it.
3956     BoUpSLP::ValueList Operands;
3957     StoreInst *I = *it;
3958     // Collect the chain into a list.
3959     while (Tails.count(I) || Heads.count(I)) {
3960       if (VectorizedStores.count(I))
3961         break;
3962       Operands.push_back(I);
3963       // Move to the next value in the chain.
3964       I = ConsecutiveChain[I];
3965     }
3966 
3967     // FIXME: Is division-by-2 the correct step? Should we assert that the
3968     // register size is a power-of-2?
3969     for (unsigned Size = R.getMaxVecRegSize(); Size >= R.getMinVecRegSize();
3970          Size /= 2) {
3971       if (vectorizeStoreChain(Operands, R, Size)) {
3972         // Mark the vectorized stores so that we don't vectorize them again.
3973         VectorizedStores.insert(Operands.begin(), Operands.end());
3974         Changed = true;
3975         break;
3976       }
3977     }
3978   }
3979 
3980   return Changed;
3981 }
3982 
3983 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) {
3984 
3985   // Initialize the collections. We will make a single pass over the block.
3986   Stores.clear();
3987   GEPs.clear();
3988 
3989   // Visit the store and getelementptr instructions in BB and organize them in
3990   // Stores and GEPs according to the underlying objects of their pointer
3991   // operands.
3992   for (Instruction &I : *BB) {
3993 
3994     // Ignore store instructions that are volatile or have a pointer operand
3995     // that doesn't point to a scalar type.
3996     if (auto *SI = dyn_cast<StoreInst>(&I)) {
3997       if (!SI->isSimple())
3998         continue;
3999       if (!isValidElementType(SI->getValueOperand()->getType()))
4000         continue;
4001       Stores[GetUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI);
4002     }
4003 
4004     // Ignore getelementptr instructions that have more than one index, a
4005     // constant index, or a pointer operand that doesn't point to a scalar
4006     // type.
4007     else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
4008       auto Idx = GEP->idx_begin()->get();
4009       if (GEP->getNumIndices() > 1 || isa<Constant>(Idx))
4010         continue;
4011       if (!isValidElementType(Idx->getType()))
4012         continue;
4013       if (GEP->getType()->isVectorTy())
4014         continue;
4015       GEPs[GetUnderlyingObject(GEP->getPointerOperand(), *DL)].push_back(GEP);
4016     }
4017   }
4018 }
4019 
4020 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) {
4021   if (!A || !B)
4022     return false;
4023   Value *VL[] = { A, B };
4024   return tryToVectorizeList(VL, R, None, true);
4025 }
4026 
4027 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
4028                                            ArrayRef<Value *> BuildVector,
4029                                            bool AllowReorder) {
4030   if (VL.size() < 2)
4031     return false;
4032 
4033   DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " << VL.size()
4034                << ".\n");
4035 
4036   // Check that all of the parts are scalar instructions of the same type.
4037   Instruction *I0 = dyn_cast<Instruction>(VL[0]);
4038   if (!I0)
4039     return false;
4040 
4041   unsigned Opcode0 = I0->getOpcode();
4042 
4043   unsigned Sz = R.getVectorElementSize(I0);
4044   unsigned MinVF = std::max(2U, R.getMinVecRegSize() / Sz);
4045   unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF);
4046   if (MaxVF < 2)
4047     return false;
4048 
4049   for (Value *V : VL) {
4050     Type *Ty = V->getType();
4051     if (!isValidElementType(Ty))
4052       return false;
4053     Instruction *Inst = dyn_cast<Instruction>(V);
4054     if (!Inst || Inst->getOpcode() != Opcode0)
4055       return false;
4056   }
4057 
4058   bool Changed = false;
4059 
4060   // Keep track of values that were deleted by vectorizing in the loop below.
4061   SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end());
4062 
4063   unsigned NextInst = 0, MaxInst = VL.size();
4064   for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF;
4065        VF /= 2) {
4066     // No actual vectorization should happen, if number of parts is the same as
4067     // provided vectorization factor (i.e. the scalar type is used for vector
4068     // code during codegen).
4069     auto *VecTy = VectorType::get(VL[0]->getType(), VF);
4070     if (TTI->getNumberOfParts(VecTy) == VF)
4071       continue;
4072     for (unsigned I = NextInst; I < MaxInst; ++I) {
4073       unsigned OpsWidth = 0;
4074 
4075       if (I + VF > MaxInst)
4076         OpsWidth = MaxInst - I;
4077       else
4078         OpsWidth = VF;
4079 
4080       if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2)
4081         break;
4082 
4083       // Check that a previous iteration of this loop did not delete the Value.
4084       if (hasValueBeenRAUWed(VL, TrackValues, I, OpsWidth))
4085         continue;
4086 
4087       DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "
4088                    << "\n");
4089       ArrayRef<Value *> Ops = VL.slice(I, OpsWidth);
4090 
4091       ArrayRef<Value *> BuildVectorSlice;
4092       if (!BuildVector.empty())
4093         BuildVectorSlice = BuildVector.slice(I, OpsWidth);
4094 
4095       R.buildTree(Ops, BuildVectorSlice);
4096       // TODO: check if we can allow reordering for more cases.
4097       if (AllowReorder && R.shouldReorder()) {
4098         // Conceptually, there is nothing actually preventing us from trying to
4099         // reorder a larger list. In fact, we do exactly this when vectorizing
4100         // reductions. However, at this point, we only expect to get here from
4101         // tryToVectorizePair().
4102         assert(Ops.size() == 2);
4103         assert(BuildVectorSlice.empty());
4104         Value *ReorderedOps[] = {Ops[1], Ops[0]};
4105         R.buildTree(ReorderedOps, None);
4106       }
4107       if (R.isTreeTinyAndNotFullyVectorizable())
4108         continue;
4109 
4110       R.computeMinimumValueSizes();
4111       int Cost = R.getTreeCost();
4112 
4113       if (Cost < -SLPCostThreshold) {
4114         DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n");
4115         Value *VectorizedRoot = R.vectorizeTree();
4116 
4117         // Reconstruct the build vector by extracting the vectorized root. This
4118         // way we handle the case where some elements of the vector are
4119         // undefined.
4120         //  (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2))
4121         if (!BuildVectorSlice.empty()) {
4122           // The insert point is the last build vector instruction. The
4123           // vectorized root will precede it. This guarantees that we get an
4124           // instruction. The vectorized tree could have been constant folded.
4125           Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back());
4126           unsigned VecIdx = 0;
4127           for (auto &V : BuildVectorSlice) {
4128             IRBuilder<NoFolder> Builder(InsertAfter->getParent(),
4129                                         ++BasicBlock::iterator(InsertAfter));
4130             Instruction *I = cast<Instruction>(V);
4131             assert(isa<InsertElementInst>(I) || isa<InsertValueInst>(I));
4132             Instruction *Extract =
4133                 cast<Instruction>(Builder.CreateExtractElement(
4134                     VectorizedRoot, Builder.getInt32(VecIdx++)));
4135             I->setOperand(1, Extract);
4136             I->removeFromParent();
4137             I->insertAfter(Extract);
4138             InsertAfter = I;
4139           }
4140         }
4141         // Move to the next bundle.
4142         I += VF - 1;
4143         NextInst = I + 1;
4144         Changed = true;
4145       }
4146     }
4147   }
4148 
4149   return Changed;
4150 }
4151 
4152 bool SLPVectorizerPass::tryToVectorize(BinaryOperator *V, BoUpSLP &R) {
4153   if (!V)
4154     return false;
4155 
4156   Value *P = V->getParent();
4157 
4158   // Vectorize in current basic block only.
4159   auto *Op0 = dyn_cast<Instruction>(V->getOperand(0));
4160   auto *Op1 = dyn_cast<Instruction>(V->getOperand(1));
4161   if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P)
4162     return false;
4163 
4164   // Try to vectorize V.
4165   if (tryToVectorizePair(Op0, Op1, R))
4166     return true;
4167 
4168   auto *A = dyn_cast<BinaryOperator>(Op0);
4169   auto *B = dyn_cast<BinaryOperator>(Op1);
4170   // Try to skip B.
4171   if (B && B->hasOneUse()) {
4172     auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0));
4173     auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1));
4174     if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R))
4175       return true;
4176     if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R))
4177       return true;
4178   }
4179 
4180   // Try to skip A.
4181   if (A && A->hasOneUse()) {
4182     auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0));
4183     auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1));
4184     if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R))
4185       return true;
4186     if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R))
4187       return true;
4188   }
4189   return false;
4190 }
4191 
4192 /// \brief Generate a shuffle mask to be used in a reduction tree.
4193 ///
4194 /// \param VecLen The length of the vector to be reduced.
4195 /// \param NumEltsToRdx The number of elements that should be reduced in the
4196 ///        vector.
4197 /// \param IsPairwise Whether the reduction is a pairwise or splitting
4198 ///        reduction. A pairwise reduction will generate a mask of
4199 ///        <0,2,...> or <1,3,..> while a splitting reduction will generate
4200 ///        <2,3, undef,undef> for a vector of 4 and NumElts = 2.
4201 /// \param IsLeft True will generate a mask of even elements, odd otherwise.
4202 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx,
4203                                    bool IsPairwise, bool IsLeft,
4204                                    IRBuilder<> &Builder) {
4205   assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask");
4206 
4207   SmallVector<Constant *, 32> ShuffleMask(
4208       VecLen, UndefValue::get(Builder.getInt32Ty()));
4209 
4210   if (IsPairwise)
4211     // Build a mask of 0, 2, ... (left) or 1, 3, ... (right).
4212     for (unsigned i = 0; i != NumEltsToRdx; ++i)
4213       ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft);
4214   else
4215     // Move the upper half of the vector to the lower half.
4216     for (unsigned i = 0; i != NumEltsToRdx; ++i)
4217       ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i);
4218 
4219   return ConstantVector::get(ShuffleMask);
4220 }
4221 
4222 namespace {
4223 /// Model horizontal reductions.
4224 ///
4225 /// A horizontal reduction is a tree of reduction operations (currently add and
4226 /// fadd) that has operations that can be put into a vector as its leaf.
4227 /// For example, this tree:
4228 ///
4229 /// mul mul mul mul
4230 ///  \  /    \  /
4231 ///   +       +
4232 ///    \     /
4233 ///       +
4234 /// This tree has "mul" as its reduced values and "+" as its reduction
4235 /// operations. A reduction might be feeding into a store or a binary operation
4236 /// feeding a phi.
4237 ///    ...
4238 ///    \  /
4239 ///     +
4240 ///     |
4241 ///  phi +=
4242 ///
4243 ///  Or:
4244 ///    ...
4245 ///    \  /
4246 ///     +
4247 ///     |
4248 ///   *p =
4249 ///
4250 class HorizontalReduction {
4251   SmallVector<Value *, 16> ReductionOps;
4252   SmallVector<Value *, 32> ReducedVals;
4253   // Use map vector to make stable output.
4254   MapVector<Instruction *, Value *> ExtraArgs;
4255 
4256   BinaryOperator *ReductionRoot = nullptr;
4257   // After successfull horizontal reduction vectorization attempt for PHI node
4258   // vectorizer tries to update root binary op by combining vectorized tree and
4259   // the ReductionPHI node. But during vectorization this ReductionPHI can be
4260   // vectorized itself and replaced by the undef value, while the instruction
4261   // itself is marked for deletion. This 'marked for deletion' PHI node then can
4262   // be used in new binary operation, causing "Use still stuck around after Def
4263   // is destroyed" crash upon PHI node deletion.
4264   WeakVH ReductionPHI;
4265 
4266   /// The opcode of the reduction.
4267   Instruction::BinaryOps ReductionOpcode = Instruction::BinaryOpsEnd;
4268   /// The opcode of the values we perform a reduction on.
4269   unsigned ReducedValueOpcode = 0;
4270   /// Should we model this reduction as a pairwise reduction tree or a tree that
4271   /// splits the vector in halves and adds those halves.
4272   bool IsPairwiseReduction = false;
4273 
4274   /// Checks if the ParentStackElem.first should be marked as a reduction
4275   /// operation with an extra argument or as extra argument itself.
4276   void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem,
4277                     Value *ExtraArg) {
4278     if (ExtraArgs.count(ParentStackElem.first)) {
4279       ExtraArgs[ParentStackElem.first] = nullptr;
4280       // We ran into something like:
4281       // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg.
4282       // The whole ParentStackElem.first should be considered as an extra value
4283       // in this case.
4284       // Do not perform analysis of remaining operands of ParentStackElem.first
4285       // instruction, this whole instruction is an extra argument.
4286       ParentStackElem.second = ParentStackElem.first->getNumOperands();
4287     } else {
4288       // We ran into something like:
4289       // ParentStackElem.first += ... + ExtraArg + ...
4290       ExtraArgs[ParentStackElem.first] = ExtraArg;
4291     }
4292   }
4293 
4294 public:
4295   HorizontalReduction() = default;
4296 
4297   /// \brief Try to find a reduction tree.
4298   bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B) {
4299     assert((!Phi || is_contained(Phi->operands(), B)) &&
4300            "Thi phi needs to use the binary operator");
4301 
4302     // We could have a initial reductions that is not an add.
4303     //  r *= v1 + v2 + v3 + v4
4304     // In such a case start looking for a tree rooted in the first '+'.
4305     if (Phi) {
4306       if (B->getOperand(0) == Phi) {
4307         Phi = nullptr;
4308         B = dyn_cast<BinaryOperator>(B->getOperand(1));
4309       } else if (B->getOperand(1) == Phi) {
4310         Phi = nullptr;
4311         B = dyn_cast<BinaryOperator>(B->getOperand(0));
4312       }
4313     }
4314 
4315     if (!B)
4316       return false;
4317 
4318     Type *Ty = B->getType();
4319     if (!isValidElementType(Ty))
4320       return false;
4321 
4322     ReductionOpcode = B->getOpcode();
4323     ReducedValueOpcode = 0;
4324     ReductionRoot = B;
4325     ReductionPHI = Phi;
4326 
4327     // We currently only support adds.
4328     if ((ReductionOpcode != Instruction::Add &&
4329          ReductionOpcode != Instruction::FAdd) ||
4330         !B->isAssociative())
4331       return false;
4332 
4333     // Post order traverse the reduction tree starting at B. We only handle true
4334     // trees containing only binary operators or selects.
4335     SmallVector<std::pair<Instruction *, unsigned>, 32> Stack;
4336     Stack.push_back(std::make_pair(B, 0));
4337     while (!Stack.empty()) {
4338       Instruction *TreeN = Stack.back().first;
4339       unsigned EdgeToVist = Stack.back().second++;
4340       bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode;
4341 
4342       // Postorder vist.
4343       if (EdgeToVist == 2 || IsReducedValue) {
4344         if (IsReducedValue)
4345           ReducedVals.push_back(TreeN);
4346         else {
4347           auto I = ExtraArgs.find(TreeN);
4348           if (I != ExtraArgs.end() && !I->second) {
4349             // Check if TreeN is an extra argument of its parent operation.
4350             if (Stack.size() <= 1) {
4351               // TreeN can't be an extra argument as it is a root reduction
4352               // operation.
4353               return false;
4354             }
4355             // Yes, TreeN is an extra argument, do not add it to a list of
4356             // reduction operations.
4357             // Stack[Stack.size() - 2] always points to the parent operation.
4358             markExtraArg(Stack[Stack.size() - 2], TreeN);
4359             ExtraArgs.erase(TreeN);
4360           } else
4361             ReductionOps.push_back(TreeN);
4362         }
4363         // Retract.
4364         Stack.pop_back();
4365         continue;
4366       }
4367 
4368       // Visit left or right.
4369       Value *NextV = TreeN->getOperand(EdgeToVist);
4370       if (NextV != Phi) {
4371         auto *I = dyn_cast<Instruction>(NextV);
4372         // Continue analysis if the next operand is a reduction operation or
4373         // (possibly) a reduced value. If the reduced value opcode is not set,
4374         // the first met operation != reduction operation is considered as the
4375         // reduced value class.
4376         if (I && (!ReducedValueOpcode || I->getOpcode() == ReducedValueOpcode ||
4377                   I->getOpcode() == ReductionOpcode)) {
4378           // Only handle trees in the current basic block.
4379           if (I->getParent() != B->getParent()) {
4380             // I is an extra argument for TreeN (its parent operation).
4381             markExtraArg(Stack.back(), I);
4382             continue;
4383           }
4384 
4385           // Each tree node needs to have one user except for the ultimate
4386           // reduction.
4387           if (!I->hasOneUse() && I != B) {
4388             // I is an extra argument for TreeN (its parent operation).
4389             markExtraArg(Stack.back(), I);
4390             continue;
4391           }
4392 
4393           if (I->getOpcode() == ReductionOpcode) {
4394             // We need to be able to reassociate the reduction operations.
4395             if (!I->isAssociative()) {
4396               // I is an extra argument for TreeN (its parent operation).
4397               markExtraArg(Stack.back(), I);
4398               continue;
4399             }
4400           } else if (ReducedValueOpcode &&
4401                      ReducedValueOpcode != I->getOpcode()) {
4402             // Make sure that the opcodes of the operations that we are going to
4403             // reduce match.
4404             // I is an extra argument for TreeN (its parent operation).
4405             markExtraArg(Stack.back(), I);
4406             continue;
4407           } else if (!ReducedValueOpcode)
4408             ReducedValueOpcode = I->getOpcode();
4409 
4410           Stack.push_back(std::make_pair(I, 0));
4411           continue;
4412         }
4413         // NextV is an extra argument for TreeN (its parent operation).
4414         markExtraArg(Stack.back(), NextV);
4415       }
4416     }
4417     return true;
4418   }
4419 
4420   /// \brief Attempt to vectorize the tree found by
4421   /// matchAssociativeReduction.
4422   bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) {
4423     if (ReducedVals.empty())
4424       return false;
4425 
4426     // If there is a sufficient number of reduction values, reduce
4427     // to a nearby power-of-2. Can safely generate oversized
4428     // vectors and rely on the backend to split them to legal sizes.
4429     unsigned NumReducedVals = ReducedVals.size();
4430     if (NumReducedVals < 4)
4431       return false;
4432 
4433     unsigned ReduxWidth = PowerOf2Floor(NumReducedVals);
4434 
4435     Value *VectorizedTree = nullptr;
4436     IRBuilder<> Builder(ReductionRoot);
4437     FastMathFlags Unsafe;
4438     Unsafe.setUnsafeAlgebra();
4439     Builder.setFastMathFlags(Unsafe);
4440     unsigned i = 0;
4441 
4442     MapVector<Value *, DebugLoc> ExternallyUsedValues;
4443     for (auto &Pair : ExtraArgs)
4444       ExternallyUsedValues[Pair.second] = Pair.first->getDebugLoc();
4445     while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) {
4446       auto VL = makeArrayRef(&ReducedVals[i], ReduxWidth);
4447       V.buildTree(VL, ExternallyUsedValues, ReductionOps);
4448       if (V.shouldReorder()) {
4449         SmallVector<Value *, 8> Reversed(VL.rbegin(), VL.rend());
4450         V.buildTree(Reversed, ExternallyUsedValues, ReductionOps);
4451       }
4452       if (V.isTreeTinyAndNotFullyVectorizable())
4453         break;
4454 
4455       V.computeMinimumValueSizes();
4456 
4457       // Estimate cost.
4458       int Cost =
4459           V.getTreeCost() + getReductionCost(TTI, ReducedVals[i], ReduxWidth);
4460       if (Cost >= -SLPCostThreshold)
4461         break;
4462 
4463       DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost
4464                    << ". (HorRdx)\n");
4465 
4466       // Vectorize a tree.
4467       DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc();
4468       Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues);
4469 
4470       // Emit a reduction.
4471       Value *ReducedSubTree =
4472           emitReduction(VectorizedRoot, Builder, ReduxWidth);
4473       if (VectorizedTree) {
4474         Builder.SetCurrentDebugLocation(Loc);
4475         VectorizedTree = Builder.CreateBinOp(ReductionOpcode, VectorizedTree,
4476                                              ReducedSubTree, "bin.rdx");
4477       } else
4478         VectorizedTree = ReducedSubTree;
4479       i += ReduxWidth;
4480       ReduxWidth = PowerOf2Floor(NumReducedVals - i);
4481     }
4482 
4483     if (VectorizedTree) {
4484       // Finish the reduction.
4485       for (; i < NumReducedVals; ++i) {
4486         auto *I = cast<Instruction>(ReducedVals[i]);
4487         Builder.SetCurrentDebugLocation(I->getDebugLoc());
4488         VectorizedTree =
4489             Builder.CreateBinOp(ReductionOpcode, VectorizedTree, I);
4490       }
4491       for (auto &Pair : ExternallyUsedValues) {
4492         Builder.SetCurrentDebugLocation(Pair.second);
4493         VectorizedTree = Builder.CreateBinOp(ReductionOpcode, VectorizedTree,
4494                                              Pair.first, "bin.extra");
4495       }
4496       // Update users.
4497       if (ReductionPHI && !isa<UndefValue>(ReductionPHI)) {
4498         assert(ReductionRoot && "Need a reduction operation");
4499         ReductionRoot->setOperand(0, VectorizedTree);
4500         ReductionRoot->setOperand(1, ReductionPHI);
4501       } else
4502         ReductionRoot->replaceAllUsesWith(VectorizedTree);
4503     }
4504     return VectorizedTree != nullptr;
4505   }
4506 
4507   unsigned numReductionValues() const {
4508     return ReducedVals.size();
4509   }
4510 
4511 private:
4512   /// \brief Calculate the cost of a reduction.
4513   int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal,
4514                        unsigned ReduxWidth) {
4515     Type *ScalarTy = FirstReducedVal->getType();
4516     Type *VecTy = VectorType::get(ScalarTy, ReduxWidth);
4517 
4518     int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true);
4519     int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false);
4520 
4521     IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost;
4522     int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost;
4523 
4524     int ScalarReduxCost =
4525         (ReduxWidth - 1) *
4526         TTI->getArithmeticInstrCost(ReductionOpcode, ScalarTy);
4527 
4528     DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost
4529                  << " for reduction that starts with " << *FirstReducedVal
4530                  << " (It is a "
4531                  << (IsPairwiseReduction ? "pairwise" : "splitting")
4532                  << " reduction)\n");
4533 
4534     return VecReduxCost - ScalarReduxCost;
4535   }
4536 
4537   /// \brief Emit a horizontal reduction of the vectorized value.
4538   Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder,
4539                        unsigned ReduxWidth) {
4540     assert(VectorizedValue && "Need to have a vectorized tree node");
4541     assert(isPowerOf2_32(ReduxWidth) &&
4542            "We only handle power-of-two reductions for now");
4543 
4544     Value *TmpVec = VectorizedValue;
4545     for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) {
4546       if (IsPairwiseReduction) {
4547         Value *LeftMask =
4548           createRdxShuffleMask(ReduxWidth, i, true, true, Builder);
4549         Value *RightMask =
4550           createRdxShuffleMask(ReduxWidth, i, true, false, Builder);
4551 
4552         Value *LeftShuf = Builder.CreateShuffleVector(
4553           TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l");
4554         Value *RightShuf = Builder.CreateShuffleVector(
4555           TmpVec, UndefValue::get(TmpVec->getType()), (RightMask),
4556           "rdx.shuf.r");
4557         TmpVec = Builder.CreateBinOp(ReductionOpcode, LeftShuf, RightShuf,
4558                                      "bin.rdx");
4559       } else {
4560         Value *UpperHalf =
4561           createRdxShuffleMask(ReduxWidth, i, false, false, Builder);
4562         Value *Shuf = Builder.CreateShuffleVector(
4563           TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf");
4564         TmpVec = Builder.CreateBinOp(ReductionOpcode, TmpVec, Shuf, "bin.rdx");
4565       }
4566     }
4567 
4568     // The result is in the first element of the vector.
4569     return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0));
4570   }
4571 };
4572 } // end anonymous namespace
4573 
4574 /// \brief Recognize construction of vectors like
4575 ///  %ra = insertelement <4 x float> undef, float %s0, i32 0
4576 ///  %rb = insertelement <4 x float> %ra, float %s1, i32 1
4577 ///  %rc = insertelement <4 x float> %rb, float %s2, i32 2
4578 ///  %rd = insertelement <4 x float> %rc, float %s3, i32 3
4579 ///
4580 /// Returns true if it matches
4581 ///
4582 static bool findBuildVector(InsertElementInst *FirstInsertElem,
4583                             SmallVectorImpl<Value *> &BuildVector,
4584                             SmallVectorImpl<Value *> &BuildVectorOpds) {
4585   if (!isa<UndefValue>(FirstInsertElem->getOperand(0)))
4586     return false;
4587 
4588   InsertElementInst *IE = FirstInsertElem;
4589   while (true) {
4590     BuildVector.push_back(IE);
4591     BuildVectorOpds.push_back(IE->getOperand(1));
4592 
4593     if (IE->use_empty())
4594       return false;
4595 
4596     InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back());
4597     if (!NextUse)
4598       return true;
4599 
4600     // If this isn't the final use, make sure the next insertelement is the only
4601     // use. It's OK if the final constructed vector is used multiple times
4602     if (!IE->hasOneUse())
4603       return false;
4604 
4605     IE = NextUse;
4606   }
4607 
4608   return false;
4609 }
4610 
4611 /// \brief Like findBuildVector, but looks backwards for construction of aggregate.
4612 ///
4613 /// \return true if it matches.
4614 static bool findBuildAggregate(InsertValueInst *IV,
4615                                SmallVectorImpl<Value *> &BuildVector,
4616                                SmallVectorImpl<Value *> &BuildVectorOpds) {
4617   Value *V = nullptr;
4618   do {
4619     BuildVector.push_back(IV);
4620     BuildVectorOpds.push_back(IV->getInsertedValueOperand());
4621     V = IV->getAggregateOperand();
4622     if (isa<UndefValue>(V))
4623       break;
4624     IV = dyn_cast<InsertValueInst>(V);
4625     if (!IV || !IV->hasOneUse())
4626       return false;
4627   } while (true);
4628   std::reverse(BuildVector.begin(), BuildVector.end());
4629   std::reverse(BuildVectorOpds.begin(), BuildVectorOpds.end());
4630   return true;
4631 }
4632 
4633 static bool PhiTypeSorterFunc(Value *V, Value *V2) {
4634   return V->getType() < V2->getType();
4635 }
4636 
4637 /// \brief Try and get a reduction value from a phi node.
4638 ///
4639 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions
4640 /// if they come from either \p ParentBB or a containing loop latch.
4641 ///
4642 /// \returns A candidate reduction value if possible, or \code nullptr \endcode
4643 /// if not possible.
4644 static Value *getReductionValue(const DominatorTree *DT, PHINode *P,
4645                                 BasicBlock *ParentBB, LoopInfo *LI) {
4646   // There are situations where the reduction value is not dominated by the
4647   // reduction phi. Vectorizing such cases has been reported to cause
4648   // miscompiles. See PR25787.
4649   auto DominatedReduxValue = [&](Value *R) {
4650     return (
4651         dyn_cast<Instruction>(R) &&
4652         DT->dominates(P->getParent(), dyn_cast<Instruction>(R)->getParent()));
4653   };
4654 
4655   Value *Rdx = nullptr;
4656 
4657   // Return the incoming value if it comes from the same BB as the phi node.
4658   if (P->getIncomingBlock(0) == ParentBB) {
4659     Rdx = P->getIncomingValue(0);
4660   } else if (P->getIncomingBlock(1) == ParentBB) {
4661     Rdx = P->getIncomingValue(1);
4662   }
4663 
4664   if (Rdx && DominatedReduxValue(Rdx))
4665     return Rdx;
4666 
4667   // Otherwise, check whether we have a loop latch to look at.
4668   Loop *BBL = LI->getLoopFor(ParentBB);
4669   if (!BBL)
4670     return nullptr;
4671   BasicBlock *BBLatch = BBL->getLoopLatch();
4672   if (!BBLatch)
4673     return nullptr;
4674 
4675   // There is a loop latch, return the incoming value if it comes from
4676   // that. This reduction pattern occasionally turns up.
4677   if (P->getIncomingBlock(0) == BBLatch) {
4678     Rdx = P->getIncomingValue(0);
4679   } else if (P->getIncomingBlock(1) == BBLatch) {
4680     Rdx = P->getIncomingValue(1);
4681   }
4682 
4683   if (Rdx && DominatedReduxValue(Rdx))
4684     return Rdx;
4685 
4686   return nullptr;
4687 }
4688 
4689 namespace {
4690 /// Tracks instructons and its children.
4691 class WeakVHWithLevel final : public CallbackVH {
4692   /// Operand index of the instruction currently beeing analized.
4693   unsigned Level = 0;
4694   /// Is this the instruction that should be vectorized, or are we now
4695   /// processing children (i.e. operands of this instruction) for potential
4696   /// vectorization?
4697   bool IsInitial = true;
4698 
4699 public:
4700   explicit WeakVHWithLevel() = default;
4701   WeakVHWithLevel(Value *V) : CallbackVH(V){};
4702   /// Restart children analysis each time it is repaced by the new instruction.
4703   void allUsesReplacedWith(Value *New) override {
4704     setValPtr(New);
4705     Level = 0;
4706     IsInitial = true;
4707   }
4708   /// Check if the instruction was not deleted during vectorization.
4709   bool isValid() const { return !getValPtr(); }
4710   /// Is the istruction itself must be vectorized?
4711   bool isInitial() const { return IsInitial; }
4712   /// Try to vectorize children.
4713   void clearInitial() { IsInitial = false; }
4714   /// Are all children processed already?
4715   bool isFinal() const {
4716     assert(getValPtr() &&
4717            (isa<Instruction>(getValPtr()) &&
4718             cast<Instruction>(getValPtr())->getNumOperands() >= Level));
4719     return getValPtr() &&
4720            cast<Instruction>(getValPtr())->getNumOperands() == Level;
4721   }
4722   /// Get next child operation.
4723   Value *nextOperand() {
4724     assert(getValPtr() && isa<Instruction>(getValPtr()) &&
4725            cast<Instruction>(getValPtr())->getNumOperands() > Level);
4726     return cast<Instruction>(getValPtr())->getOperand(Level++);
4727   }
4728   virtual ~WeakVHWithLevel() = default;
4729 };
4730 } // namespace
4731 
4732 /// \brief Attempt to reduce a horizontal reduction.
4733 /// If it is legal to match a horizontal reduction feeding
4734 /// the phi node P with reduction operators Root in a basic block BB, then check
4735 /// if it can be done.
4736 /// \returns true if a horizontal reduction was matched and reduced.
4737 /// \returns false if a horizontal reduction was not matched.
4738 static bool canBeVectorized(
4739     PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R,
4740     TargetTransformInfo *TTI,
4741     const function_ref<bool(BinaryOperator *, BoUpSLP &)> Vectorize) {
4742   if (!ShouldVectorizeHor)
4743     return false;
4744 
4745   if (!Root)
4746     return false;
4747 
4748   if (Root->getParent() != BB)
4749     return false;
4750   SmallVector<WeakVHWithLevel, 8> Stack(1, Root);
4751   SmallSet<Value *, 8> VisitedInstrs;
4752   bool Res = false;
4753   while (!Stack.empty()) {
4754     Value *V = Stack.back();
4755     if (!V) {
4756       Stack.pop_back();
4757       continue;
4758     }
4759     auto *Inst = dyn_cast<Instruction>(V);
4760     if (!Inst || isa<PHINode>(Inst)) {
4761       Stack.pop_back();
4762       continue;
4763     }
4764     if (Stack.back().isInitial()) {
4765       Stack.back().clearInitial();
4766       if (auto *BI = dyn_cast<BinaryOperator>(Inst)) {
4767         HorizontalReduction HorRdx;
4768         if (HorRdx.matchAssociativeReduction(P, BI)) {
4769           if (HorRdx.tryToReduce(R, TTI)) {
4770             Res = true;
4771             P = nullptr;
4772             continue;
4773           }
4774         }
4775         if (P) {
4776           Inst = dyn_cast<Instruction>(BI->getOperand(0));
4777           if (Inst == P)
4778             Inst = dyn_cast<Instruction>(BI->getOperand(1));
4779           if (!Inst) {
4780             P = nullptr;
4781             continue;
4782           }
4783         }
4784       }
4785       P = nullptr;
4786       if (Vectorize(dyn_cast<BinaryOperator>(Inst), R)) {
4787         Res = true;
4788         continue;
4789       }
4790     }
4791     if (Stack.back().isFinal()) {
4792       Stack.pop_back();
4793       continue;
4794     }
4795 
4796     if (auto *NextV = dyn_cast<Instruction>(Stack.back().nextOperand()))
4797       if (NextV->getParent() == BB && VisitedInstrs.insert(NextV).second &&
4798           Stack.size() < RecursionMaxDepth)
4799         Stack.push_back(NextV);
4800   }
4801   return Res;
4802 }
4803 
4804 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V,
4805                                                  BasicBlock *BB, BoUpSLP &R,
4806                                                  TargetTransformInfo *TTI) {
4807   if (!V)
4808     return false;
4809   auto *I = dyn_cast<Instruction>(V);
4810   if (!I)
4811     return false;
4812 
4813   if (!isa<BinaryOperator>(I))
4814     P = nullptr;
4815   // Try to match and vectorize a horizontal reduction.
4816   return canBeVectorized(P, I, BB, R, TTI,
4817                          [this](BinaryOperator *BI, BoUpSLP &R) -> bool {
4818                            return tryToVectorize(BI, R);
4819                          });
4820 }
4821 
4822 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
4823   bool Changed = false;
4824   SmallVector<Value *, 4> Incoming;
4825   SmallSet<Value *, 16> VisitedInstrs;
4826 
4827   bool HaveVectorizedPhiNodes = true;
4828   while (HaveVectorizedPhiNodes) {
4829     HaveVectorizedPhiNodes = false;
4830 
4831     // Collect the incoming values from the PHIs.
4832     Incoming.clear();
4833     for (Instruction &I : *BB) {
4834       PHINode *P = dyn_cast<PHINode>(&I);
4835       if (!P)
4836         break;
4837 
4838       if (!VisitedInstrs.count(P))
4839         Incoming.push_back(P);
4840     }
4841 
4842     // Sort by type.
4843     std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc);
4844 
4845     // Try to vectorize elements base on their type.
4846     for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(),
4847                                            E = Incoming.end();
4848          IncIt != E;) {
4849 
4850       // Look for the next elements with the same type.
4851       SmallVector<Value *, 4>::iterator SameTypeIt = IncIt;
4852       while (SameTypeIt != E &&
4853              (*SameTypeIt)->getType() == (*IncIt)->getType()) {
4854         VisitedInstrs.insert(*SameTypeIt);
4855         ++SameTypeIt;
4856       }
4857 
4858       // Try to vectorize them.
4859       unsigned NumElts = (SameTypeIt - IncIt);
4860       DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n");
4861       if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R)) {
4862         // Success start over because instructions might have been changed.
4863         HaveVectorizedPhiNodes = true;
4864         Changed = true;
4865         break;
4866       }
4867 
4868       // Start over at the next instruction of a different type (or the end).
4869       IncIt = SameTypeIt;
4870     }
4871   }
4872 
4873   VisitedInstrs.clear();
4874 
4875   for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) {
4876     // We may go through BB multiple times so skip the one we have checked.
4877     if (!VisitedInstrs.insert(&*it).second)
4878       continue;
4879 
4880     if (isa<DbgInfoIntrinsic>(it))
4881       continue;
4882 
4883     // Try to vectorize reductions that use PHINodes.
4884     if (PHINode *P = dyn_cast<PHINode>(it)) {
4885       // Check that the PHI is a reduction PHI.
4886       if (P->getNumIncomingValues() != 2)
4887         return Changed;
4888 
4889       // Try to match and vectorize a horizontal reduction.
4890       if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R,
4891                                    TTI)) {
4892         Changed = true;
4893         it = BB->begin();
4894         e = BB->end();
4895         continue;
4896       }
4897       continue;
4898     }
4899 
4900     if (ShouldStartVectorizeHorAtStore) {
4901       if (StoreInst *SI = dyn_cast<StoreInst>(it)) {
4902         // Try to match and vectorize a horizontal reduction.
4903         if (vectorizeRootInstruction(nullptr, SI->getValueOperand(), BB, R,
4904                                      TTI)) {
4905           Changed = true;
4906           it = BB->begin();
4907           e = BB->end();
4908           continue;
4909         }
4910       }
4911     }
4912 
4913     // Try to vectorize horizontal reductions feeding into a return.
4914     if (ReturnInst *RI = dyn_cast<ReturnInst>(it)) {
4915       if (RI->getNumOperands() != 0) {
4916         // Try to match and vectorize a horizontal reduction.
4917         if (vectorizeRootInstruction(nullptr, RI->getOperand(0), BB, R, TTI)) {
4918           Changed = true;
4919           it = BB->begin();
4920           e = BB->end();
4921           continue;
4922         }
4923       }
4924     }
4925 
4926     // Try to vectorize trees that start at compare instructions.
4927     if (CmpInst *CI = dyn_cast<CmpInst>(it)) {
4928       if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) {
4929         Changed = true;
4930         // We would like to start over since some instructions are deleted
4931         // and the iterator may become invalid value.
4932         it = BB->begin();
4933         e = BB->end();
4934         continue;
4935       }
4936 
4937       for (int I = 0; I < 2; ++I) {
4938         if (vectorizeRootInstruction(nullptr, CI->getOperand(I), BB, R, TTI)) {
4939           Changed = true;
4940           // We would like to start over since some instructions are deleted
4941           // and the iterator may become invalid value.
4942           it = BB->begin();
4943           e = BB->end();
4944           break;
4945         }
4946       }
4947       continue;
4948     }
4949 
4950     // Try to vectorize trees that start at insertelement instructions.
4951     if (InsertElementInst *FirstInsertElem = dyn_cast<InsertElementInst>(it)) {
4952       SmallVector<Value *, 16> BuildVector;
4953       SmallVector<Value *, 16> BuildVectorOpds;
4954       if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds))
4955         continue;
4956 
4957       // Vectorize starting with the build vector operands ignoring the
4958       // BuildVector instructions for the purpose of scheduling and user
4959       // extraction.
4960       if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) {
4961         Changed = true;
4962         it = BB->begin();
4963         e = BB->end();
4964       }
4965 
4966       continue;
4967     }
4968 
4969     // Try to vectorize trees that start at insertvalue instructions feeding into
4970     // a store.
4971     if (StoreInst *SI = dyn_cast<StoreInst>(it)) {
4972       if (InsertValueInst *LastInsertValue = dyn_cast<InsertValueInst>(SI->getValueOperand())) {
4973         const DataLayout &DL = BB->getModule()->getDataLayout();
4974         if (R.canMapToVector(SI->getValueOperand()->getType(), DL)) {
4975           SmallVector<Value *, 16> BuildVector;
4976           SmallVector<Value *, 16> BuildVectorOpds;
4977           if (!findBuildAggregate(LastInsertValue, BuildVector, BuildVectorOpds))
4978             continue;
4979 
4980           DEBUG(dbgs() << "SLP: store of array mappable to vector: " << *SI << "\n");
4981           if (tryToVectorizeList(BuildVectorOpds, R, BuildVector, false)) {
4982             Changed = true;
4983             it = BB->begin();
4984             e = BB->end();
4985           }
4986           continue;
4987         }
4988       }
4989     }
4990   }
4991 
4992   return Changed;
4993 }
4994 
4995 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) {
4996   auto Changed = false;
4997   for (auto &Entry : GEPs) {
4998 
4999     // If the getelementptr list has fewer than two elements, there's nothing
5000     // to do.
5001     if (Entry.second.size() < 2)
5002       continue;
5003 
5004     DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "
5005                  << Entry.second.size() << ".\n");
5006 
5007     // We process the getelementptr list in chunks of 16 (like we do for
5008     // stores) to minimize compile-time.
5009     for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += 16) {
5010       auto Len = std::min<unsigned>(BE - BI, 16);
5011       auto GEPList = makeArrayRef(&Entry.second[BI], Len);
5012 
5013       // Initialize a set a candidate getelementptrs. Note that we use a
5014       // SetVector here to preserve program order. If the index computations
5015       // are vectorizable and begin with loads, we want to minimize the chance
5016       // of having to reorder them later.
5017       SetVector<Value *> Candidates(GEPList.begin(), GEPList.end());
5018 
5019       // Some of the candidates may have already been vectorized after we
5020       // initially collected them. If so, the WeakVHs will have nullified the
5021       // values, so remove them from the set of candidates.
5022       Candidates.remove(nullptr);
5023 
5024       // Remove from the set of candidates all pairs of getelementptrs with
5025       // constant differences. Such getelementptrs are likely not good
5026       // candidates for vectorization in a bottom-up phase since one can be
5027       // computed from the other. We also ensure all candidate getelementptr
5028       // indices are unique.
5029       for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) {
5030         auto *GEPI = cast<GetElementPtrInst>(GEPList[I]);
5031         if (!Candidates.count(GEPI))
5032           continue;
5033         auto *SCEVI = SE->getSCEV(GEPList[I]);
5034         for (int J = I + 1; J < E && Candidates.size() > 1; ++J) {
5035           auto *GEPJ = cast<GetElementPtrInst>(GEPList[J]);
5036           auto *SCEVJ = SE->getSCEV(GEPList[J]);
5037           if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) {
5038             Candidates.remove(GEPList[I]);
5039             Candidates.remove(GEPList[J]);
5040           } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) {
5041             Candidates.remove(GEPList[J]);
5042           }
5043         }
5044       }
5045 
5046       // We break out of the above computation as soon as we know there are
5047       // fewer than two candidates remaining.
5048       if (Candidates.size() < 2)
5049         continue;
5050 
5051       // Add the single, non-constant index of each candidate to the bundle. We
5052       // ensured the indices met these constraints when we originally collected
5053       // the getelementptrs.
5054       SmallVector<Value *, 16> Bundle(Candidates.size());
5055       auto BundleIndex = 0u;
5056       for (auto *V : Candidates) {
5057         auto *GEP = cast<GetElementPtrInst>(V);
5058         auto *GEPIdx = GEP->idx_begin()->get();
5059         assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx));
5060         Bundle[BundleIndex++] = GEPIdx;
5061       }
5062 
5063       // Try and vectorize the indices. We are currently only interested in
5064       // gather-like cases of the form:
5065       //
5066       // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ...
5067       //
5068       // where the loads of "a", the loads of "b", and the subtractions can be
5069       // performed in parallel. It's likely that detecting this pattern in a
5070       // bottom-up phase will be simpler and less costly than building a
5071       // full-blown top-down phase beginning at the consecutive loads.
5072       Changed |= tryToVectorizeList(Bundle, R);
5073     }
5074   }
5075   return Changed;
5076 }
5077 
5078 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) {
5079   bool Changed = false;
5080   // Attempt to sort and vectorize each of the store-groups.
5081   for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e;
5082        ++it) {
5083     if (it->second.size() < 2)
5084       continue;
5085 
5086     DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
5087           << it->second.size() << ".\n");
5088 
5089     // Process the stores in chunks of 16.
5090     // TODO: The limit of 16 inhibits greater vectorization factors.
5091     //       For example, AVX2 supports v32i8. Increasing this limit, however,
5092     //       may cause a significant compile-time increase.
5093     for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) {
5094       unsigned Len = std::min<unsigned>(CE - CI, 16);
5095       Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len), R);
5096     }
5097   }
5098   return Changed;
5099 }
5100 
5101 char SLPVectorizer::ID = 0;
5102 static const char lv_name[] = "SLP Vectorizer";
5103 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false)
5104 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
5105 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
5106 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
5107 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
5108 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
5109 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
5110 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false)
5111 
5112 namespace llvm {
5113 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); }
5114 }
5115