1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10 // stores that can be put together into vector-stores. Next, it attempts to
11 // construct vectorizable tree using the use-def chains. If a profitable tree
12 // was found, the SLP vectorizer performs vectorization on the tree.
13 //
14 // The pass is inspired by the work described in the paper:
15 //  "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
16 //
17 //===----------------------------------------------------------------------===//
18 #include "llvm/Transforms/Vectorize/SLPVectorizer.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/PostOrderIterator.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CodeMetrics.h"
24 #include "llvm/Analysis/GlobalsModRef.h"
25 #include "llvm/Analysis/LoopAccessAnalysis.h"
26 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/Analysis/VectorUtils.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/IRBuilder.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/Module.h"
35 #include "llvm/IR/NoFolder.h"
36 #include "llvm/IR/Type.h"
37 #include "llvm/IR/Value.h"
38 #include "llvm/IR/Verifier.h"
39 #include "llvm/Pass.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Transforms/Vectorize.h"
44 #include <algorithm>
45 #include <memory>
46 
47 using namespace llvm;
48 using namespace slpvectorizer;
49 
50 #define SV_NAME "slp-vectorizer"
51 #define DEBUG_TYPE "SLP"
52 
53 STATISTIC(NumVectorInstructions, "Number of vector instructions generated");
54 
55 static cl::opt<int>
56     SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
57                      cl::desc("Only vectorize if you gain more than this "
58                               "number "));
59 
60 static cl::opt<bool>
61 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden,
62                    cl::desc("Attempt to vectorize horizontal reductions"));
63 
64 static cl::opt<bool> ShouldStartVectorizeHorAtStore(
65     "slp-vectorize-hor-store", cl::init(false), cl::Hidden,
66     cl::desc(
67         "Attempt to vectorize horizontal reductions feeding into a store"));
68 
69 static cl::opt<int>
70 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden,
71     cl::desc("Attempt to vectorize for this register size in bits"));
72 
73 /// Limits the size of scheduling regions in a block.
74 /// It avoid long compile times for _very_ large blocks where vector
75 /// instructions are spread over a wide range.
76 /// This limit is way higher than needed by real-world functions.
77 static cl::opt<int>
78 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden,
79     cl::desc("Limit the size of the SLP scheduling region per block"));
80 
81 static cl::opt<int> MinVectorRegSizeOption(
82     "slp-min-reg-size", cl::init(128), cl::Hidden,
83     cl::desc("Attempt to vectorize for this register size in bits"));
84 
85 static cl::opt<unsigned> RecursionMaxDepth(
86     "slp-recursion-max-depth", cl::init(12), cl::Hidden,
87     cl::desc("Limit the recursion depth when building a vectorizable tree"));
88 
89 static cl::opt<unsigned> MinTreeSize(
90     "slp-min-tree-size", cl::init(3), cl::Hidden,
91     cl::desc("Only vectorize small trees if they are fully vectorizable"));
92 
93 // Limit the number of alias checks. The limit is chosen so that
94 // it has no negative effect on the llvm benchmarks.
95 static const unsigned AliasedCheckLimit = 10;
96 
97 // Another limit for the alias checks: The maximum distance between load/store
98 // instructions where alias checks are done.
99 // This limit is useful for very large basic blocks.
100 static const unsigned MaxMemDepDistance = 160;
101 
102 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling
103 /// regions to be handled.
104 static const int MinScheduleRegionSize = 16;
105 
106 /// \brief Predicate for the element types that the SLP vectorizer supports.
107 ///
108 /// The most important thing to filter here are types which are invalid in LLVM
109 /// vectors. We also filter target specific types which have absolutely no
110 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
111 /// avoids spending time checking the cost model and realizing that they will
112 /// be inevitably scalarized.
113 static bool isValidElementType(Type *Ty) {
114   return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
115          !Ty->isPPC_FP128Ty();
116 }
117 
118 /// \returns true if all of the instructions in \p VL are in the same block or
119 /// false otherwise.
120 static bool allSameBlock(ArrayRef<Value *> VL) {
121   Instruction *I0 = dyn_cast<Instruction>(VL[0]);
122   if (!I0)
123     return false;
124   BasicBlock *BB = I0->getParent();
125   for (int i = 1, e = VL.size(); i < e; i++) {
126     Instruction *I = dyn_cast<Instruction>(VL[i]);
127     if (!I)
128       return false;
129 
130     if (BB != I->getParent())
131       return false;
132   }
133   return true;
134 }
135 
136 /// \returns True if all of the values in \p VL are constants.
137 static bool allConstant(ArrayRef<Value *> VL) {
138   for (Value *i : VL)
139     if (!isa<Constant>(i))
140       return false;
141   return true;
142 }
143 
144 /// \returns True if all of the values in \p VL are identical.
145 static bool isSplat(ArrayRef<Value *> VL) {
146   for (unsigned i = 1, e = VL.size(); i < e; ++i)
147     if (VL[i] != VL[0])
148       return false;
149   return true;
150 }
151 
152 ///\returns Opcode that can be clubbed with \p Op to create an alternate
153 /// sequence which can later be merged as a ShuffleVector instruction.
154 static unsigned getAltOpcode(unsigned Op) {
155   switch (Op) {
156   case Instruction::FAdd:
157     return Instruction::FSub;
158   case Instruction::FSub:
159     return Instruction::FAdd;
160   case Instruction::Add:
161     return Instruction::Sub;
162   case Instruction::Sub:
163     return Instruction::Add;
164   default:
165     return 0;
166   }
167 }
168 
169 ///\returns bool representing if Opcode \p Op can be part
170 /// of an alternate sequence which can later be merged as
171 /// a ShuffleVector instruction.
172 static bool canCombineAsAltInst(unsigned Op) {
173   return Op == Instruction::FAdd || Op == Instruction::FSub ||
174          Op == Instruction::Sub || Op == Instruction::Add;
175 }
176 
177 /// \returns ShuffleVector instruction if instructions in \p VL have
178 ///  alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence.
179 /// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...)
180 static unsigned isAltInst(ArrayRef<Value *> VL) {
181   Instruction *I0 = dyn_cast<Instruction>(VL[0]);
182   unsigned Opcode = I0->getOpcode();
183   unsigned AltOpcode = getAltOpcode(Opcode);
184   for (int i = 1, e = VL.size(); i < e; i++) {
185     Instruction *I = dyn_cast<Instruction>(VL[i]);
186     if (!I || I->getOpcode() != ((i & 1) ? AltOpcode : Opcode))
187       return 0;
188   }
189   return Instruction::ShuffleVector;
190 }
191 
192 /// \returns The opcode if all of the Instructions in \p VL have the same
193 /// opcode, or zero.
194 static unsigned getSameOpcode(ArrayRef<Value *> VL) {
195   Instruction *I0 = dyn_cast<Instruction>(VL[0]);
196   if (!I0)
197     return 0;
198   unsigned Opcode = I0->getOpcode();
199   for (int i = 1, e = VL.size(); i < e; i++) {
200     Instruction *I = dyn_cast<Instruction>(VL[i]);
201     if (!I || Opcode != I->getOpcode()) {
202       if (canCombineAsAltInst(Opcode) && i == 1)
203         return isAltInst(VL);
204       return 0;
205     }
206   }
207   return Opcode;
208 }
209 
210 /// Get the intersection (logical and) of all of the potential IR flags
211 /// of each scalar operation (VL) that will be converted into a vector (I).
212 /// Flag set: NSW, NUW, exact, and all of fast-math.
213 static void propagateIRFlags(Value *I, ArrayRef<Value *> VL) {
214   if (auto *VecOp = dyn_cast<Instruction>(I)) {
215     if (auto *Intersection = dyn_cast<Instruction>(VL[0])) {
216       // Intersection is initialized to the 0th scalar,
217       // so start counting from index '1'.
218       for (int i = 1, e = VL.size(); i < e; ++i) {
219         if (auto *Scalar = dyn_cast<Instruction>(VL[i]))
220           Intersection->andIRFlags(Scalar);
221       }
222       VecOp->copyIRFlags(Intersection);
223     }
224   }
225 }
226 
227 /// \returns true if all of the values in \p VL have the same type or false
228 /// otherwise.
229 static bool allSameType(ArrayRef<Value *> VL) {
230   Type *Ty = VL[0]->getType();
231   for (int i = 1, e = VL.size(); i < e; i++)
232     if (VL[i]->getType() != Ty)
233       return false;
234 
235   return true;
236 }
237 
238 /// \returns True if Extract{Value,Element} instruction extracts element Idx.
239 static bool matchExtractIndex(Instruction *E, unsigned Idx, unsigned Opcode) {
240   assert(Opcode == Instruction::ExtractElement ||
241          Opcode == Instruction::ExtractValue);
242   if (Opcode == Instruction::ExtractElement) {
243     ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1));
244     return CI && CI->getZExtValue() == Idx;
245   } else {
246     ExtractValueInst *EI = cast<ExtractValueInst>(E);
247     return EI->getNumIndices() == 1 && *EI->idx_begin() == Idx;
248   }
249 }
250 
251 /// \returns True if in-tree use also needs extract. This refers to
252 /// possible scalar operand in vectorized instruction.
253 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
254                                     TargetLibraryInfo *TLI) {
255 
256   unsigned Opcode = UserInst->getOpcode();
257   switch (Opcode) {
258   case Instruction::Load: {
259     LoadInst *LI = cast<LoadInst>(UserInst);
260     return (LI->getPointerOperand() == Scalar);
261   }
262   case Instruction::Store: {
263     StoreInst *SI = cast<StoreInst>(UserInst);
264     return (SI->getPointerOperand() == Scalar);
265   }
266   case Instruction::Call: {
267     CallInst *CI = cast<CallInst>(UserInst);
268     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
269     if (hasVectorInstrinsicScalarOpd(ID, 1)) {
270       return (CI->getArgOperand(1) == Scalar);
271     }
272   }
273   default:
274     return false;
275   }
276 }
277 
278 /// \returns the AA location that is being access by the instruction.
279 static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) {
280   if (StoreInst *SI = dyn_cast<StoreInst>(I))
281     return MemoryLocation::get(SI);
282   if (LoadInst *LI = dyn_cast<LoadInst>(I))
283     return MemoryLocation::get(LI);
284   return MemoryLocation();
285 }
286 
287 /// \returns True if the instruction is not a volatile or atomic load/store.
288 static bool isSimple(Instruction *I) {
289   if (LoadInst *LI = dyn_cast<LoadInst>(I))
290     return LI->isSimple();
291   if (StoreInst *SI = dyn_cast<StoreInst>(I))
292     return SI->isSimple();
293   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
294     return !MI->isVolatile();
295   return true;
296 }
297 
298 namespace llvm {
299 namespace slpvectorizer {
300 /// Bottom Up SLP Vectorizer.
301 class BoUpSLP {
302 public:
303   typedef SmallVector<Value *, 8> ValueList;
304   typedef SmallVector<Instruction *, 16> InstrList;
305   typedef SmallPtrSet<Value *, 16> ValueSet;
306   typedef SmallVector<StoreInst *, 8> StoreList;
307 
308   BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti,
309           TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li,
310           DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB,
311           const DataLayout *DL)
312       : NumLoadsWantToKeepOrder(0), NumLoadsWantToChangeOrder(0), F(Func),
313         SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), DB(DB),
314         DL(DL), Builder(Se->getContext()) {
315     CodeMetrics::collectEphemeralValues(F, AC, EphValues);
316     // Use the vector register size specified by the target unless overridden
317     // by a command-line option.
318     // TODO: It would be better to limit the vectorization factor based on
319     //       data type rather than just register size. For example, x86 AVX has
320     //       256-bit registers, but it does not support integer operations
321     //       at that width (that requires AVX2).
322     if (MaxVectorRegSizeOption.getNumOccurrences())
323       MaxVecRegSize = MaxVectorRegSizeOption;
324     else
325       MaxVecRegSize = TTI->getRegisterBitWidth(true);
326 
327     MinVecRegSize = MinVectorRegSizeOption;
328   }
329 
330   /// \brief Vectorize the tree that starts with the elements in \p VL.
331   /// Returns the vectorized root.
332   Value *vectorizeTree();
333 
334   /// \returns the cost incurred by unwanted spills and fills, caused by
335   /// holding live values over call sites.
336   int getSpillCost();
337 
338   /// \returns the vectorization cost of the subtree that starts at \p VL.
339   /// A negative number means that this is profitable.
340   int getTreeCost();
341 
342   /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
343   /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
344   void buildTree(ArrayRef<Value *> Roots,
345                  ArrayRef<Value *> UserIgnoreLst = None);
346 
347   /// Clear the internal data structures that are created by 'buildTree'.
348   void deleteTree() {
349     VectorizableTree.clear();
350     ScalarToTreeEntry.clear();
351     MustGather.clear();
352     ExternalUses.clear();
353     NumLoadsWantToKeepOrder = 0;
354     NumLoadsWantToChangeOrder = 0;
355     for (auto &Iter : BlocksSchedules) {
356       BlockScheduling *BS = Iter.second.get();
357       BS->clear();
358     }
359     MinBWs.clear();
360   }
361 
362   /// \brief Perform LICM and CSE on the newly generated gather sequences.
363   void optimizeGatherSequence();
364 
365   /// \returns true if it is beneficial to reverse the vector order.
366   bool shouldReorder() const {
367     return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder;
368   }
369 
370   /// \return The vector element size in bits to use when vectorizing the
371   /// expression tree ending at \p V. If V is a store, the size is the width of
372   /// the stored value. Otherwise, the size is the width of the largest loaded
373   /// value reaching V. This method is used by the vectorizer to calculate
374   /// vectorization factors.
375   unsigned getVectorElementSize(Value *V);
376 
377   /// Compute the minimum type sizes required to represent the entries in a
378   /// vectorizable tree.
379   void computeMinimumValueSizes();
380 
381   // \returns maximum vector register size as set by TTI or overridden by cl::opt.
382   unsigned getMaxVecRegSize() const {
383     return MaxVecRegSize;
384   }
385 
386   // \returns minimum vector register size as set by cl::opt.
387   unsigned getMinVecRegSize() const {
388     return MinVecRegSize;
389   }
390 
391   /// \brief Check if ArrayType or StructType is isomorphic to some VectorType.
392   ///
393   /// \returns number of elements in vector if isomorphism exists, 0 otherwise.
394   unsigned canMapToVector(Type *T, const DataLayout &DL) const;
395 
396   /// \returns True if the VectorizableTree is both tiny and not fully
397   /// vectorizable. We do not vectorize such trees.
398   bool isTreeTinyAndNotFullyVectorizable();
399 
400 private:
401   struct TreeEntry;
402 
403   /// \returns the cost of the vectorizable entry.
404   int getEntryCost(TreeEntry *E);
405 
406   /// This is the recursive part of buildTree.
407   void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth);
408 
409   /// \returns True if the ExtractElement/ExtractValue instructions in VL can
410   /// be vectorized to use the original vector (or aggregate "bitcast" to a vector).
411   bool canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const;
412 
413   /// Vectorize a single entry in the tree. VL icontains all isomorphic scalars
414   /// in order of its usage in a user program, for example ADD1, ADD2 and so on
415   /// or LOAD1 , LOAD2 etc.
416   Value *vectorizeTree(ArrayRef<Value *> VL, TreeEntry *E);
417 
418   /// Vectorize a single entry in the tree, starting in \p VL.
419   Value *vectorizeTree(ArrayRef<Value *> VL);
420 
421   /// \returns the pointer to the vectorized value if \p VL is already
422   /// vectorized, or NULL. They may happen in cycles.
423   Value *alreadyVectorized(ArrayRef<Value *> VL) const;
424 
425   /// \returns the scalarization cost for this type. Scalarization in this
426   /// context means the creation of vectors from a group of scalars.
427   int getGatherCost(Type *Ty);
428 
429   /// \returns the scalarization cost for this list of values. Assuming that
430   /// this subtree gets vectorized, we may need to extract the values from the
431   /// roots. This method calculates the cost of extracting the values.
432   int getGatherCost(ArrayRef<Value *> VL);
433 
434   /// \brief Set the Builder insert point to one after the last instruction in
435   /// the bundle
436   void setInsertPointAfterBundle(ArrayRef<Value *> VL);
437 
438   /// \returns a vector from a collection of scalars in \p VL.
439   Value *Gather(ArrayRef<Value *> VL, VectorType *Ty);
440 
441   /// \returns whether the VectorizableTree is fully vectorizable and will
442   /// be beneficial even the tree height is tiny.
443   bool isFullyVectorizableTinyTree();
444 
445   /// \reorder commutative operands in alt shuffle if they result in
446   ///  vectorized code.
447   void reorderAltShuffleOperands(ArrayRef<Value *> VL,
448                                  SmallVectorImpl<Value *> &Left,
449                                  SmallVectorImpl<Value *> &Right);
450   /// \reorder commutative operands to get better probability of
451   /// generating vectorized code.
452   void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
453                                       SmallVectorImpl<Value *> &Left,
454                                       SmallVectorImpl<Value *> &Right);
455   struct TreeEntry {
456     TreeEntry() : Scalars(), VectorizedValue(nullptr),
457     NeedToGather(0), NeedToShuffle(0) {}
458 
459     /// \returns true if the scalars in VL are equal to this entry.
460     bool isSame(ArrayRef<Value *> VL) const {
461       assert(VL.size() == Scalars.size() && "Invalid size");
462       return std::equal(VL.begin(), VL.end(), Scalars.begin());
463     }
464 
465     /// \returns true if the scalars in VL are found in this tree entry.
466     bool isFoundJumbled(ArrayRef<Value *> VL, const DataLayout &DL,
467                         ScalarEvolution &SE) const {
468       assert(VL.size() == Scalars.size() && "Invalid size");
469       SmallVector<Value *, 8> List;
470       sortMemAccesses(VL, DL, SE, List);
471       return std::equal(List.begin(), List.end(), Scalars.begin());
472     }
473 
474     /// A vector of scalars.
475     ValueList Scalars;
476 
477     /// The Scalars are vectorized into this value. It is initialized to Null.
478     Value *VectorizedValue;
479 
480     /// Do we need to gather this sequence ?
481     bool NeedToGather;
482 
483     /// Do we need to shuffle the load ?
484     bool NeedToShuffle;
485   };
486 
487   /// Create a new VectorizableTree entry.
488   TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized,
489                           bool NeedToShuffle) {
490     VectorizableTree.emplace_back();
491     int idx = VectorizableTree.size() - 1;
492     TreeEntry *Last = &VectorizableTree[idx];
493     Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end());
494     Last->NeedToGather = !Vectorized;
495     Last->NeedToShuffle = NeedToShuffle;
496     if (Vectorized) {
497       for (int i = 0, e = VL.size(); i != e; ++i) {
498         assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!");
499         ScalarToTreeEntry[VL[i]] = idx;
500       }
501     } else {
502       MustGather.insert(VL.begin(), VL.end());
503     }
504     return Last;
505   }
506 
507   /// -- Vectorization State --
508   /// Holds all of the tree entries.
509   std::vector<TreeEntry> VectorizableTree;
510 
511   /// Maps a specific scalar to its tree entry.
512   SmallDenseMap<Value*, int> ScalarToTreeEntry;
513 
514   /// A list of scalars that we found that we need to keep as scalars.
515   ValueSet MustGather;
516 
517   /// This POD struct describes one external user in the vectorized tree.
518   struct ExternalUser {
519     ExternalUser (Value *S, llvm::User *U, int L) :
520       Scalar(S), User(U), Lane(L){}
521     // Which scalar in our function.
522     Value *Scalar;
523     // Which user that uses the scalar.
524     llvm::User *User;
525     // Which lane does the scalar belong to.
526     int Lane;
527   };
528   typedef SmallVector<ExternalUser, 16> UserList;
529 
530   /// Checks if two instructions may access the same memory.
531   ///
532   /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
533   /// is invariant in the calling loop.
534   bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1,
535                  Instruction *Inst2) {
536 
537     // First check if the result is already in the cache.
538     AliasCacheKey key = std::make_pair(Inst1, Inst2);
539     Optional<bool> &result = AliasCache[key];
540     if (result.hasValue()) {
541       return result.getValue();
542     }
543     MemoryLocation Loc2 = getLocation(Inst2, AA);
544     bool aliased = true;
545     if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) {
546       // Do the alias check.
547       aliased = AA->alias(Loc1, Loc2);
548     }
549     // Store the result in the cache.
550     result = aliased;
551     return aliased;
552   }
553 
554   typedef std::pair<Instruction *, Instruction *> AliasCacheKey;
555 
556   /// Cache for alias results.
557   /// TODO: consider moving this to the AliasAnalysis itself.
558   DenseMap<AliasCacheKey, Optional<bool>> AliasCache;
559 
560   /// Removes an instruction from its block and eventually deletes it.
561   /// It's like Instruction::eraseFromParent() except that the actual deletion
562   /// is delayed until BoUpSLP is destructed.
563   /// This is required to ensure that there are no incorrect collisions in the
564   /// AliasCache, which can happen if a new instruction is allocated at the
565   /// same address as a previously deleted instruction.
566   void eraseInstruction(Instruction *I) {
567     I->removeFromParent();
568     I->dropAllReferences();
569     DeletedInstructions.push_back(std::unique_ptr<Instruction>(I));
570   }
571 
572   /// Temporary store for deleted instructions. Instructions will be deleted
573   /// eventually when the BoUpSLP is destructed.
574   SmallVector<std::unique_ptr<Instruction>, 8> DeletedInstructions;
575 
576   /// A list of values that need to extracted out of the tree.
577   /// This list holds pairs of (Internal Scalar : External User).
578   UserList ExternalUses;
579 
580   /// Values used only by @llvm.assume calls.
581   SmallPtrSet<const Value *, 32> EphValues;
582 
583   /// Holds all of the instructions that we gathered.
584   SetVector<Instruction *> GatherSeq;
585   /// A list of blocks that we are going to CSE.
586   SetVector<BasicBlock *> CSEBlocks;
587 
588   /// Contains all scheduling relevant data for an instruction.
589   /// A ScheduleData either represents a single instruction or a member of an
590   /// instruction bundle (= a group of instructions which is combined into a
591   /// vector instruction).
592   struct ScheduleData {
593 
594     // The initial value for the dependency counters. It means that the
595     // dependencies are not calculated yet.
596     enum { InvalidDeps = -1 };
597 
598     ScheduleData()
599         : Inst(nullptr), FirstInBundle(nullptr), NextInBundle(nullptr),
600           NextLoadStore(nullptr), SchedulingRegionID(0), SchedulingPriority(0),
601           Dependencies(InvalidDeps), UnscheduledDeps(InvalidDeps),
602           UnscheduledDepsInBundle(InvalidDeps), IsScheduled(false) {}
603 
604     void init(int BlockSchedulingRegionID) {
605       FirstInBundle = this;
606       NextInBundle = nullptr;
607       NextLoadStore = nullptr;
608       IsScheduled = false;
609       SchedulingRegionID = BlockSchedulingRegionID;
610       UnscheduledDepsInBundle = UnscheduledDeps;
611       clearDependencies();
612     }
613 
614     /// Returns true if the dependency information has been calculated.
615     bool hasValidDependencies() const { return Dependencies != InvalidDeps; }
616 
617     /// Returns true for single instructions and for bundle representatives
618     /// (= the head of a bundle).
619     bool isSchedulingEntity() const { return FirstInBundle == this; }
620 
621     /// Returns true if it represents an instruction bundle and not only a
622     /// single instruction.
623     bool isPartOfBundle() const {
624       return NextInBundle != nullptr || FirstInBundle != this;
625     }
626 
627     /// Returns true if it is ready for scheduling, i.e. it has no more
628     /// unscheduled depending instructions/bundles.
629     bool isReady() const {
630       assert(isSchedulingEntity() &&
631              "can't consider non-scheduling entity for ready list");
632       return UnscheduledDepsInBundle == 0 && !IsScheduled;
633     }
634 
635     /// Modifies the number of unscheduled dependencies, also updating it for
636     /// the whole bundle.
637     int incrementUnscheduledDeps(int Incr) {
638       UnscheduledDeps += Incr;
639       return FirstInBundle->UnscheduledDepsInBundle += Incr;
640     }
641 
642     /// Sets the number of unscheduled dependencies to the number of
643     /// dependencies.
644     void resetUnscheduledDeps() {
645       incrementUnscheduledDeps(Dependencies - UnscheduledDeps);
646     }
647 
648     /// Clears all dependency information.
649     void clearDependencies() {
650       Dependencies = InvalidDeps;
651       resetUnscheduledDeps();
652       MemoryDependencies.clear();
653     }
654 
655     void dump(raw_ostream &os) const {
656       if (!isSchedulingEntity()) {
657         os << "/ " << *Inst;
658       } else if (NextInBundle) {
659         os << '[' << *Inst;
660         ScheduleData *SD = NextInBundle;
661         while (SD) {
662           os << ';' << *SD->Inst;
663           SD = SD->NextInBundle;
664         }
665         os << ']';
666       } else {
667         os << *Inst;
668       }
669     }
670 
671     Instruction *Inst;
672 
673     /// Points to the head in an instruction bundle (and always to this for
674     /// single instructions).
675     ScheduleData *FirstInBundle;
676 
677     /// Single linked list of all instructions in a bundle. Null if it is a
678     /// single instruction.
679     ScheduleData *NextInBundle;
680 
681     /// Single linked list of all memory instructions (e.g. load, store, call)
682     /// in the block - until the end of the scheduling region.
683     ScheduleData *NextLoadStore;
684 
685     /// The dependent memory instructions.
686     /// This list is derived on demand in calculateDependencies().
687     SmallVector<ScheduleData *, 4> MemoryDependencies;
688 
689     /// This ScheduleData is in the current scheduling region if this matches
690     /// the current SchedulingRegionID of BlockScheduling.
691     int SchedulingRegionID;
692 
693     /// Used for getting a "good" final ordering of instructions.
694     int SchedulingPriority;
695 
696     /// The number of dependencies. Constitutes of the number of users of the
697     /// instruction plus the number of dependent memory instructions (if any).
698     /// This value is calculated on demand.
699     /// If InvalidDeps, the number of dependencies is not calculated yet.
700     ///
701     int Dependencies;
702 
703     /// The number of dependencies minus the number of dependencies of scheduled
704     /// instructions. As soon as this is zero, the instruction/bundle gets ready
705     /// for scheduling.
706     /// Note that this is negative as long as Dependencies is not calculated.
707     int UnscheduledDeps;
708 
709     /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for
710     /// single instructions.
711     int UnscheduledDepsInBundle;
712 
713     /// True if this instruction is scheduled (or considered as scheduled in the
714     /// dry-run).
715     bool IsScheduled;
716   };
717 
718 #ifndef NDEBUG
719   friend inline raw_ostream &operator<<(raw_ostream &os,
720                                         const BoUpSLP::ScheduleData &SD) {
721     SD.dump(os);
722     return os;
723   }
724 #endif
725 
726   /// Contains all scheduling data for a basic block.
727   ///
728   struct BlockScheduling {
729 
730     BlockScheduling(BasicBlock *BB)
731         : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize),
732           ScheduleStart(nullptr), ScheduleEnd(nullptr),
733           FirstLoadStoreInRegion(nullptr), LastLoadStoreInRegion(nullptr),
734           ScheduleRegionSize(0),
735           ScheduleRegionSizeLimit(ScheduleRegionSizeBudget),
736           // Make sure that the initial SchedulingRegionID is greater than the
737           // initial SchedulingRegionID in ScheduleData (which is 0).
738           SchedulingRegionID(1) {}
739 
740     void clear() {
741       ReadyInsts.clear();
742       ScheduleStart = nullptr;
743       ScheduleEnd = nullptr;
744       FirstLoadStoreInRegion = nullptr;
745       LastLoadStoreInRegion = nullptr;
746 
747       // Reduce the maximum schedule region size by the size of the
748       // previous scheduling run.
749       ScheduleRegionSizeLimit -= ScheduleRegionSize;
750       if (ScheduleRegionSizeLimit < MinScheduleRegionSize)
751         ScheduleRegionSizeLimit = MinScheduleRegionSize;
752       ScheduleRegionSize = 0;
753 
754       // Make a new scheduling region, i.e. all existing ScheduleData is not
755       // in the new region yet.
756       ++SchedulingRegionID;
757     }
758 
759     ScheduleData *getScheduleData(Value *V) {
760       ScheduleData *SD = ScheduleDataMap[V];
761       if (SD && SD->SchedulingRegionID == SchedulingRegionID)
762         return SD;
763       return nullptr;
764     }
765 
766     bool isInSchedulingRegion(ScheduleData *SD) {
767       return SD->SchedulingRegionID == SchedulingRegionID;
768     }
769 
770     /// Marks an instruction as scheduled and puts all dependent ready
771     /// instructions into the ready-list.
772     template <typename ReadyListType>
773     void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
774       SD->IsScheduled = true;
775       DEBUG(dbgs() << "SLP:   schedule " << *SD << "\n");
776 
777       ScheduleData *BundleMember = SD;
778       while (BundleMember) {
779         // Handle the def-use chain dependencies.
780         for (Use &U : BundleMember->Inst->operands()) {
781           ScheduleData *OpDef = getScheduleData(U.get());
782           if (OpDef && OpDef->hasValidDependencies() &&
783               OpDef->incrementUnscheduledDeps(-1) == 0) {
784             // There are no more unscheduled dependencies after decrementing,
785             // so we can put the dependent instruction into the ready list.
786             ScheduleData *DepBundle = OpDef->FirstInBundle;
787             assert(!DepBundle->IsScheduled &&
788                    "already scheduled bundle gets ready");
789             ReadyList.insert(DepBundle);
790             DEBUG(dbgs() << "SLP:    gets ready (def): " << *DepBundle << "\n");
791           }
792         }
793         // Handle the memory dependencies.
794         for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) {
795           if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) {
796             // There are no more unscheduled dependencies after decrementing,
797             // so we can put the dependent instruction into the ready list.
798             ScheduleData *DepBundle = MemoryDepSD->FirstInBundle;
799             assert(!DepBundle->IsScheduled &&
800                    "already scheduled bundle gets ready");
801             ReadyList.insert(DepBundle);
802             DEBUG(dbgs() << "SLP:    gets ready (mem): " << *DepBundle << "\n");
803           }
804         }
805         BundleMember = BundleMember->NextInBundle;
806       }
807     }
808 
809     /// Put all instructions into the ReadyList which are ready for scheduling.
810     template <typename ReadyListType>
811     void initialFillReadyList(ReadyListType &ReadyList) {
812       for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
813         ScheduleData *SD = getScheduleData(I);
814         if (SD->isSchedulingEntity() && SD->isReady()) {
815           ReadyList.insert(SD);
816           DEBUG(dbgs() << "SLP:    initially in ready list: " << *I << "\n");
817         }
818       }
819     }
820 
821     /// Checks if a bundle of instructions can be scheduled, i.e. has no
822     /// cyclic dependencies. This is only a dry-run, no instructions are
823     /// actually moved at this stage.
824     bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP);
825 
826     /// Un-bundles a group of instructions.
827     void cancelScheduling(ArrayRef<Value *> VL);
828 
829     /// Extends the scheduling region so that V is inside the region.
830     /// \returns true if the region size is within the limit.
831     bool extendSchedulingRegion(Value *V);
832 
833     /// Initialize the ScheduleData structures for new instructions in the
834     /// scheduling region.
835     void initScheduleData(Instruction *FromI, Instruction *ToI,
836                           ScheduleData *PrevLoadStore,
837                           ScheduleData *NextLoadStore);
838 
839     /// Updates the dependency information of a bundle and of all instructions/
840     /// bundles which depend on the original bundle.
841     void calculateDependencies(ScheduleData *SD, bool InsertInReadyList,
842                                BoUpSLP *SLP);
843 
844     /// Sets all instruction in the scheduling region to un-scheduled.
845     void resetSchedule();
846 
847     BasicBlock *BB;
848 
849     /// Simple memory allocation for ScheduleData.
850     std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks;
851 
852     /// The size of a ScheduleData array in ScheduleDataChunks.
853     int ChunkSize;
854 
855     /// The allocator position in the current chunk, which is the last entry
856     /// of ScheduleDataChunks.
857     int ChunkPos;
858 
859     /// Attaches ScheduleData to Instruction.
860     /// Note that the mapping survives during all vectorization iterations, i.e.
861     /// ScheduleData structures are recycled.
862     DenseMap<Value *, ScheduleData *> ScheduleDataMap;
863 
864     struct ReadyList : SmallVector<ScheduleData *, 8> {
865       void insert(ScheduleData *SD) { push_back(SD); }
866     };
867 
868     /// The ready-list for scheduling (only used for the dry-run).
869     ReadyList ReadyInsts;
870 
871     /// The first instruction of the scheduling region.
872     Instruction *ScheduleStart;
873 
874     /// The first instruction _after_ the scheduling region.
875     Instruction *ScheduleEnd;
876 
877     /// The first memory accessing instruction in the scheduling region
878     /// (can be null).
879     ScheduleData *FirstLoadStoreInRegion;
880 
881     /// The last memory accessing instruction in the scheduling region
882     /// (can be null).
883     ScheduleData *LastLoadStoreInRegion;
884 
885     /// The current size of the scheduling region.
886     int ScheduleRegionSize;
887 
888     /// The maximum size allowed for the scheduling region.
889     int ScheduleRegionSizeLimit;
890 
891     /// The ID of the scheduling region. For a new vectorization iteration this
892     /// is incremented which "removes" all ScheduleData from the region.
893     int SchedulingRegionID;
894   };
895 
896   /// Attaches the BlockScheduling structures to basic blocks.
897   MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules;
898 
899   /// Performs the "real" scheduling. Done before vectorization is actually
900   /// performed in a basic block.
901   void scheduleBlock(BlockScheduling *BS);
902 
903   /// List of users to ignore during scheduling and that don't need extracting.
904   ArrayRef<Value *> UserIgnoreList;
905 
906   // Number of load bundles that contain consecutive loads.
907   int NumLoadsWantToKeepOrder;
908 
909   // Number of load bundles that contain consecutive loads in reversed order.
910   int NumLoadsWantToChangeOrder;
911 
912   // Analysis and block reference.
913   Function *F;
914   ScalarEvolution *SE;
915   TargetTransformInfo *TTI;
916   TargetLibraryInfo *TLI;
917   AliasAnalysis *AA;
918   LoopInfo *LI;
919   DominatorTree *DT;
920   AssumptionCache *AC;
921   DemandedBits *DB;
922   const DataLayout *DL;
923   unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt.
924   unsigned MinVecRegSize; // Set by cl::opt (default: 128).
925   /// Instruction builder to construct the vectorized tree.
926   IRBuilder<> Builder;
927 
928   /// A map of scalar integer values to the smallest bit width with which they
929   /// can legally be represented. The values map to (width, signed) pairs,
930   /// where "width" indicates the minimum bit width and "signed" is True if the
931   /// value must be signed-extended, rather than zero-extended, back to its
932   /// original width.
933   MapVector<Value *, std::pair<uint64_t, bool>> MinBWs;
934 };
935 
936 } // end namespace llvm
937 } // end namespace slpvectorizer
938 
939 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
940                         ArrayRef<Value *> UserIgnoreLst) {
941   deleteTree();
942   UserIgnoreList = UserIgnoreLst;
943   if (!allSameType(Roots))
944     return;
945   buildTree_rec(Roots, 0);
946 
947   // Collect the values that we need to extract from the tree.
948   for (TreeEntry &EIdx : VectorizableTree) {
949     TreeEntry *Entry = &EIdx;
950 
951     // For each lane:
952     for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
953       Value *Scalar = Entry->Scalars[Lane];
954 
955       // No need to handle users of gathered values.
956       if (Entry->NeedToGather)
957         continue;
958 
959       for (User *U : Scalar->users()) {
960         DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
961 
962         Instruction *UserInst = dyn_cast<Instruction>(U);
963         if (!UserInst)
964           continue;
965 
966         // Skip in-tree scalars that become vectors
967         if (ScalarToTreeEntry.count(U)) {
968           int Idx = ScalarToTreeEntry[U];
969           TreeEntry *UseEntry = &VectorizableTree[Idx];
970           Value *UseScalar = UseEntry->Scalars[0];
971           // Some in-tree scalars will remain as scalar in vectorized
972           // instructions. If that is the case, the one in Lane 0 will
973           // be used.
974           if (UseScalar != U ||
975               !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) {
976             DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
977                          << ".\n");
978             assert(!VectorizableTree[Idx].NeedToGather && "Bad state");
979             continue;
980           }
981         }
982 
983         // Ignore users in the user ignore list.
984         if (is_contained(UserIgnoreList, UserInst))
985           continue;
986 
987         DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " <<
988               Lane << " from " << *Scalar << ".\n");
989         ExternalUses.push_back(ExternalUser(Scalar, U, Lane));
990       }
991     }
992   }
993 }
994 
995 
996 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
997   bool isAltShuffle = false;
998   assert((allConstant(VL) || allSameType(VL)) && "Invalid types!");
999 
1000   if (Depth == RecursionMaxDepth) {
1001     DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
1002     newTreeEntry(VL, false, false);
1003     return;
1004   }
1005 
1006   // Don't handle vectors.
1007   if (VL[0]->getType()->isVectorTy()) {
1008     DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
1009     newTreeEntry(VL, false, false);
1010     return;
1011   }
1012 
1013   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
1014     if (SI->getValueOperand()->getType()->isVectorTy()) {
1015       DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
1016       newTreeEntry(VL, false, false);
1017       return;
1018     }
1019   unsigned Opcode = getSameOpcode(VL);
1020 
1021   // Check that this shuffle vector refers to the alternate
1022   // sequence of opcodes.
1023   if (Opcode == Instruction::ShuffleVector) {
1024     Instruction *I0 = dyn_cast<Instruction>(VL[0]);
1025     unsigned Op = I0->getOpcode();
1026     if (Op != Instruction::ShuffleVector)
1027       isAltShuffle = true;
1028   }
1029 
1030   // If all of the operands are identical or constant we have a simple solution.
1031   if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !Opcode) {
1032     DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
1033     newTreeEntry(VL, false, false);
1034     return;
1035   }
1036 
1037   // We now know that this is a vector of instructions of the same type from
1038   // the same block.
1039 
1040   // Don't vectorize ephemeral values.
1041   for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1042     if (EphValues.count(VL[i])) {
1043       DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
1044             ") is ephemeral.\n");
1045       newTreeEntry(VL, false, false);
1046       return;
1047     }
1048   }
1049 
1050   // Check if this is a duplicate of another entry.
1051   if (ScalarToTreeEntry.count(VL[0])) {
1052     int Idx = ScalarToTreeEntry[VL[0]];
1053     TreeEntry *E = &VectorizableTree[Idx];
1054     for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1055       DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n");
1056       if (E->Scalars[i] != VL[i]) {
1057         DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
1058         newTreeEntry(VL, false, false);
1059         return;
1060       }
1061     }
1062     DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n");
1063     return;
1064   }
1065 
1066   // Check that none of the instructions in the bundle are already in the tree.
1067   for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1068     if (ScalarToTreeEntry.count(VL[i])) {
1069       DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
1070             ") is already in tree.\n");
1071       newTreeEntry(VL, false, false);
1072       return;
1073     }
1074   }
1075 
1076   // If any of the scalars is marked as a value that needs to stay scalar then
1077   // we need to gather the scalars.
1078   for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1079     if (MustGather.count(VL[i])) {
1080       DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
1081       newTreeEntry(VL, false, false);
1082       return;
1083     }
1084   }
1085 
1086   // Check that all of the users of the scalars that we want to vectorize are
1087   // schedulable.
1088   Instruction *VL0 = cast<Instruction>(VL[0]);
1089   BasicBlock *BB = cast<Instruction>(VL0)->getParent();
1090 
1091   if (!DT->isReachableFromEntry(BB)) {
1092     // Don't go into unreachable blocks. They may contain instructions with
1093     // dependency cycles which confuse the final scheduling.
1094     DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
1095     newTreeEntry(VL, false, false);
1096     return;
1097   }
1098 
1099   // Check that every instructions appears once in this bundle.
1100   for (unsigned i = 0, e = VL.size(); i < e; ++i)
1101     for (unsigned j = i+1; j < e; ++j)
1102       if (VL[i] == VL[j]) {
1103         DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
1104         newTreeEntry(VL, false, false);
1105         return;
1106       }
1107 
1108   auto &BSRef = BlocksSchedules[BB];
1109   if (!BSRef) {
1110     BSRef = llvm::make_unique<BlockScheduling>(BB);
1111   }
1112   BlockScheduling &BS = *BSRef.get();
1113 
1114   if (!BS.tryScheduleBundle(VL, this)) {
1115     DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
1116     assert((!BS.getScheduleData(VL[0]) ||
1117             !BS.getScheduleData(VL[0])->isPartOfBundle()) &&
1118            "tryScheduleBundle should cancelScheduling on failure");
1119     newTreeEntry(VL, false, false);
1120     return;
1121   }
1122   DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
1123 
1124   switch (Opcode) {
1125     case Instruction::PHI: {
1126       PHINode *PH = dyn_cast<PHINode>(VL0);
1127 
1128       // Check for terminator values (e.g. invoke).
1129       for (unsigned j = 0; j < VL.size(); ++j)
1130         for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
1131           TerminatorInst *Term = dyn_cast<TerminatorInst>(
1132               cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i)));
1133           if (Term) {
1134             DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n");
1135             BS.cancelScheduling(VL);
1136             newTreeEntry(VL, false, false);
1137             return;
1138           }
1139         }
1140 
1141       newTreeEntry(VL, true, false);
1142       DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
1143 
1144       for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
1145         ValueList Operands;
1146         // Prepare the operand vector.
1147         for (Value *j : VL)
1148           Operands.push_back(cast<PHINode>(j)->getIncomingValueForBlock(
1149               PH->getIncomingBlock(i)));
1150 
1151         buildTree_rec(Operands, Depth + 1);
1152       }
1153       return;
1154     }
1155     case Instruction::ExtractValue:
1156     case Instruction::ExtractElement: {
1157       bool Reuse = canReuseExtract(VL, Opcode);
1158       if (Reuse) {
1159         DEBUG(dbgs() << "SLP: Reusing extract sequence.\n");
1160       } else {
1161         BS.cancelScheduling(VL);
1162       }
1163       newTreeEntry(VL, Reuse, false);
1164       return;
1165     }
1166     case Instruction::Load: {
1167       // Check that a vectorized load would load the same memory as a scalar
1168       // load.
1169       // For example we don't want vectorize loads that are smaller than 8 bit.
1170       // Even though we have a packed struct {<i2, i2, i2, i2>} LLVM treats
1171       // loading/storing it as an i8 struct. If we vectorize loads/stores from
1172       // such a struct we read/write packed bits disagreeing with the
1173       // unvectorized version.
1174       Type *ScalarTy = VL[0]->getType();
1175 
1176       if (DL->getTypeSizeInBits(ScalarTy) !=
1177           DL->getTypeAllocSizeInBits(ScalarTy)) {
1178         BS.cancelScheduling(VL);
1179         newTreeEntry(VL, false, false);
1180         DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
1181         return;
1182       }
1183 
1184       // Make sure all loads in the bundle are simple - we can't vectorize
1185       // atomic or volatile loads.
1186       for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
1187         LoadInst *L = cast<LoadInst>(VL[i]);
1188         if (!L->isSimple()) {
1189           BS.cancelScheduling(VL);
1190           newTreeEntry(VL, false, false);
1191           DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
1192           return;
1193         }
1194       }
1195 
1196       // Check if the loads are consecutive, reversed, or neither.
1197       bool Consecutive = true;
1198       bool ReverseConsecutive = true;
1199       for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
1200         if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) {
1201           Consecutive = false;
1202           break;
1203         } else {
1204           ReverseConsecutive = false;
1205         }
1206       }
1207 
1208       if (Consecutive) {
1209         ++NumLoadsWantToKeepOrder;
1210         newTreeEntry(VL, true, false);
1211         DEBUG(dbgs() << "SLP: added a vector of loads.\n");
1212         return;
1213       }
1214 
1215       // If none of the load pairs were consecutive when checked in order,
1216       // check the reverse order.
1217       if (ReverseConsecutive)
1218         for (unsigned i = VL.size() - 1; i > 0; --i)
1219           if (!isConsecutiveAccess(VL[i], VL[i - 1], *DL, *SE)) {
1220             ReverseConsecutive = false;
1221             break;
1222           }
1223 
1224       if (VL.size() > 2 && !ReverseConsecutive) {
1225         bool ShuffledLoads = true;
1226         SmallVector<Value *, 8> List;
1227         sortMemAccesses(VL, *DL, *SE, List);
1228         auto NewVL = makeArrayRef(List.begin(), List.end());
1229         for (unsigned i = 0, e = NewVL.size() - 1; i < e; ++i) {
1230           if (!isConsecutiveAccess(NewVL[i], NewVL[i + 1], *DL, *SE)) {
1231             ShuffledLoads = false;
1232             break;
1233           }
1234         }
1235         if (ShuffledLoads) {
1236           newTreeEntry(NewVL, true, true);
1237           return;
1238         }
1239       }
1240 
1241       BS.cancelScheduling(VL);
1242       newTreeEntry(VL, false, false);
1243 
1244       if (ReverseConsecutive) {
1245         ++NumLoadsWantToChangeOrder;
1246         DEBUG(dbgs() << "SLP: Gathering reversed loads.\n");
1247       } else {
1248         DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
1249       }
1250       return;
1251     }
1252     case Instruction::ZExt:
1253     case Instruction::SExt:
1254     case Instruction::FPToUI:
1255     case Instruction::FPToSI:
1256     case Instruction::FPExt:
1257     case Instruction::PtrToInt:
1258     case Instruction::IntToPtr:
1259     case Instruction::SIToFP:
1260     case Instruction::UIToFP:
1261     case Instruction::Trunc:
1262     case Instruction::FPTrunc:
1263     case Instruction::BitCast: {
1264       Type *SrcTy = VL0->getOperand(0)->getType();
1265       for (Value *Val : VL) {
1266         Type *Ty = cast<Instruction>(Val)->getOperand(0)->getType();
1267         if (Ty != SrcTy || !isValidElementType(Ty)) {
1268           BS.cancelScheduling(VL);
1269           newTreeEntry(VL, false, false);
1270           DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n");
1271           return;
1272         }
1273       }
1274       newTreeEntry(VL, true, false);
1275       DEBUG(dbgs() << "SLP: added a vector of casts.\n");
1276 
1277       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1278         ValueList Operands;
1279         // Prepare the operand vector.
1280         for (Value *j : VL)
1281           Operands.push_back(cast<Instruction>(j)->getOperand(i));
1282 
1283         buildTree_rec(Operands, Depth+1);
1284       }
1285       return;
1286     }
1287     case Instruction::ICmp:
1288     case Instruction::FCmp: {
1289       // Check that all of the compares have the same predicate.
1290       CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
1291       Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType();
1292       for (unsigned i = 1, e = VL.size(); i < e; ++i) {
1293         CmpInst *Cmp = cast<CmpInst>(VL[i]);
1294         if (Cmp->getPredicate() != P0 ||
1295             Cmp->getOperand(0)->getType() != ComparedTy) {
1296           BS.cancelScheduling(VL);
1297           newTreeEntry(VL, false, false);
1298           DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n");
1299           return;
1300         }
1301       }
1302 
1303       newTreeEntry(VL, true, false);
1304       DEBUG(dbgs() << "SLP: added a vector of compares.\n");
1305 
1306       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1307         ValueList Operands;
1308         // Prepare the operand vector.
1309         for (Value *j : VL)
1310           Operands.push_back(cast<Instruction>(j)->getOperand(i));
1311 
1312         buildTree_rec(Operands, Depth+1);
1313       }
1314       return;
1315     }
1316     case Instruction::Select:
1317     case Instruction::Add:
1318     case Instruction::FAdd:
1319     case Instruction::Sub:
1320     case Instruction::FSub:
1321     case Instruction::Mul:
1322     case Instruction::FMul:
1323     case Instruction::UDiv:
1324     case Instruction::SDiv:
1325     case Instruction::FDiv:
1326     case Instruction::URem:
1327     case Instruction::SRem:
1328     case Instruction::FRem:
1329     case Instruction::Shl:
1330     case Instruction::LShr:
1331     case Instruction::AShr:
1332     case Instruction::And:
1333     case Instruction::Or:
1334     case Instruction::Xor: {
1335       newTreeEntry(VL, true, false);
1336       DEBUG(dbgs() << "SLP: added a vector of bin op.\n");
1337 
1338       // Sort operands of the instructions so that each side is more likely to
1339       // have the same opcode.
1340       if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
1341         ValueList Left, Right;
1342         reorderInputsAccordingToOpcode(VL, Left, Right);
1343         buildTree_rec(Left, Depth + 1);
1344         buildTree_rec(Right, Depth + 1);
1345         return;
1346       }
1347 
1348       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1349         ValueList Operands;
1350         // Prepare the operand vector.
1351         for (Value *j : VL)
1352           Operands.push_back(cast<Instruction>(j)->getOperand(i));
1353 
1354         buildTree_rec(Operands, Depth+1);
1355       }
1356       return;
1357     }
1358     case Instruction::GetElementPtr: {
1359       // We don't combine GEPs with complicated (nested) indexing.
1360       for (Value *Val : VL) {
1361         if (cast<Instruction>(Val)->getNumOperands() != 2) {
1362           DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
1363           BS.cancelScheduling(VL);
1364           newTreeEntry(VL, false, false);
1365           return;
1366         }
1367       }
1368 
1369       // We can't combine several GEPs into one vector if they operate on
1370       // different types.
1371       Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType();
1372       for (Value *Val : VL) {
1373         Type *CurTy = cast<Instruction>(Val)->getOperand(0)->getType();
1374         if (Ty0 != CurTy) {
1375           DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n");
1376           BS.cancelScheduling(VL);
1377           newTreeEntry(VL, false, false);
1378           return;
1379         }
1380       }
1381 
1382       // We don't combine GEPs with non-constant indexes.
1383       for (Value *Val : VL) {
1384         auto Op = cast<Instruction>(Val)->getOperand(1);
1385         if (!isa<ConstantInt>(Op)) {
1386           DEBUG(
1387               dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n");
1388           BS.cancelScheduling(VL);
1389           newTreeEntry(VL, false, false);
1390           return;
1391         }
1392       }
1393 
1394       newTreeEntry(VL, true, false);
1395       DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
1396       for (unsigned i = 0, e = 2; i < e; ++i) {
1397         ValueList Operands;
1398         // Prepare the operand vector.
1399         for (Value *j : VL)
1400           Operands.push_back(cast<Instruction>(j)->getOperand(i));
1401 
1402         buildTree_rec(Operands, Depth + 1);
1403       }
1404       return;
1405     }
1406     case Instruction::Store: {
1407       // Check if the stores are consecutive or of we need to swizzle them.
1408       for (unsigned i = 0, e = VL.size() - 1; i < e; ++i)
1409         if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) {
1410           BS.cancelScheduling(VL);
1411           newTreeEntry(VL, false, false);
1412           DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
1413           return;
1414         }
1415 
1416       newTreeEntry(VL, true, false);
1417       DEBUG(dbgs() << "SLP: added a vector of stores.\n");
1418 
1419       ValueList Operands;
1420       for (Value *j : VL)
1421         Operands.push_back(cast<Instruction>(j)->getOperand(0));
1422 
1423       buildTree_rec(Operands, Depth + 1);
1424       return;
1425     }
1426     case Instruction::Call: {
1427       // Check if the calls are all to the same vectorizable intrinsic.
1428       CallInst *CI = cast<CallInst>(VL[0]);
1429       // Check if this is an Intrinsic call or something that can be
1430       // represented by an intrinsic call
1431       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
1432       if (!isTriviallyVectorizable(ID)) {
1433         BS.cancelScheduling(VL);
1434         newTreeEntry(VL, false, false);
1435         DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
1436         return;
1437       }
1438       Function *Int = CI->getCalledFunction();
1439       Value *A1I = nullptr;
1440       if (hasVectorInstrinsicScalarOpd(ID, 1))
1441         A1I = CI->getArgOperand(1);
1442       for (unsigned i = 1, e = VL.size(); i != e; ++i) {
1443         CallInst *CI2 = dyn_cast<CallInst>(VL[i]);
1444         if (!CI2 || CI2->getCalledFunction() != Int ||
1445             getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
1446             !CI->hasIdenticalOperandBundleSchema(*CI2)) {
1447           BS.cancelScheduling(VL);
1448           newTreeEntry(VL, false, false);
1449           DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i]
1450                        << "\n");
1451           return;
1452         }
1453         // ctlz,cttz and powi are special intrinsics whose second argument
1454         // should be same in order for them to be vectorized.
1455         if (hasVectorInstrinsicScalarOpd(ID, 1)) {
1456           Value *A1J = CI2->getArgOperand(1);
1457           if (A1I != A1J) {
1458             BS.cancelScheduling(VL);
1459             newTreeEntry(VL, false, false);
1460             DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
1461                          << " argument "<< A1I<<"!=" << A1J
1462                          << "\n");
1463             return;
1464           }
1465         }
1466         // Verify that the bundle operands are identical between the two calls.
1467         if (CI->hasOperandBundles() &&
1468             !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(),
1469                         CI->op_begin() + CI->getBundleOperandsEndIndex(),
1470                         CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
1471           BS.cancelScheduling(VL);
1472           newTreeEntry(VL, false, false);
1473           DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!="
1474                        << *VL[i] << '\n');
1475           return;
1476         }
1477       }
1478 
1479       newTreeEntry(VL, true, false);
1480       for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
1481         ValueList Operands;
1482         // Prepare the operand vector.
1483         for (Value *j : VL) {
1484           CallInst *CI2 = dyn_cast<CallInst>(j);
1485           Operands.push_back(CI2->getArgOperand(i));
1486         }
1487         buildTree_rec(Operands, Depth + 1);
1488       }
1489       return;
1490     }
1491     case Instruction::ShuffleVector: {
1492       // If this is not an alternate sequence of opcode like add-sub
1493       // then do not vectorize this instruction.
1494       if (!isAltShuffle) {
1495         BS.cancelScheduling(VL);
1496         newTreeEntry(VL, false, false);
1497         DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
1498         return;
1499       }
1500       newTreeEntry(VL, true, false);
1501       DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
1502 
1503       // Reorder operands if reordering would enable vectorization.
1504       if (isa<BinaryOperator>(VL0)) {
1505         ValueList Left, Right;
1506         reorderAltShuffleOperands(VL, Left, Right);
1507         buildTree_rec(Left, Depth + 1);
1508         buildTree_rec(Right, Depth + 1);
1509         return;
1510       }
1511 
1512       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1513         ValueList Operands;
1514         // Prepare the operand vector.
1515         for (Value *j : VL)
1516           Operands.push_back(cast<Instruction>(j)->getOperand(i));
1517 
1518         buildTree_rec(Operands, Depth + 1);
1519       }
1520       return;
1521     }
1522     default:
1523       BS.cancelScheduling(VL);
1524       newTreeEntry(VL, false, false);
1525       DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
1526       return;
1527   }
1528 }
1529 
1530 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const {
1531   unsigned N;
1532   Type *EltTy;
1533   auto *ST = dyn_cast<StructType>(T);
1534   if (ST) {
1535     N = ST->getNumElements();
1536     EltTy = *ST->element_begin();
1537   } else {
1538     N = cast<ArrayType>(T)->getNumElements();
1539     EltTy = cast<ArrayType>(T)->getElementType();
1540   }
1541   if (!isValidElementType(EltTy))
1542     return 0;
1543   uint64_t VTSize = DL.getTypeStoreSizeInBits(VectorType::get(EltTy, N));
1544   if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T))
1545     return 0;
1546   if (ST) {
1547     // Check that struct is homogeneous.
1548     for (const auto *Ty : ST->elements())
1549       if (Ty != EltTy)
1550         return 0;
1551   }
1552   return N;
1553 }
1554 
1555 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const {
1556   assert(Opcode == Instruction::ExtractElement ||
1557          Opcode == Instruction::ExtractValue);
1558   assert(Opcode == getSameOpcode(VL) && "Invalid opcode");
1559   // Check if all of the extracts come from the same vector and from the
1560   // correct offset.
1561   Value *VL0 = VL[0];
1562   Instruction *E0 = cast<Instruction>(VL0);
1563   Value *Vec = E0->getOperand(0);
1564 
1565   // We have to extract from a vector/aggregate with the same number of elements.
1566   unsigned NElts;
1567   if (Opcode == Instruction::ExtractValue) {
1568     const DataLayout &DL = E0->getModule()->getDataLayout();
1569     NElts = canMapToVector(Vec->getType(), DL);
1570     if (!NElts)
1571       return false;
1572     // Check if load can be rewritten as load of vector.
1573     LoadInst *LI = dyn_cast<LoadInst>(Vec);
1574     if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size()))
1575       return false;
1576   } else {
1577     NElts = Vec->getType()->getVectorNumElements();
1578   }
1579 
1580   if (NElts != VL.size())
1581     return false;
1582 
1583   // Check that all of the indices extract from the correct offset.
1584   if (!matchExtractIndex(E0, 0, Opcode))
1585     return false;
1586 
1587   for (unsigned i = 1, e = VL.size(); i < e; ++i) {
1588     Instruction *E = cast<Instruction>(VL[i]);
1589     if (!matchExtractIndex(E, i, Opcode))
1590       return false;
1591     if (E->getOperand(0) != Vec)
1592       return false;
1593   }
1594 
1595   return true;
1596 }
1597 
1598 int BoUpSLP::getEntryCost(TreeEntry *E) {
1599   ArrayRef<Value*> VL = E->Scalars;
1600 
1601   Type *ScalarTy = VL[0]->getType();
1602   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
1603     ScalarTy = SI->getValueOperand()->getType();
1604   VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
1605 
1606   // If we have computed a smaller type for the expression, update VecTy so
1607   // that the costs will be accurate.
1608   if (MinBWs.count(VL[0]))
1609     VecTy = VectorType::get(
1610         IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size());
1611 
1612   if (E->NeedToGather) {
1613     if (allConstant(VL))
1614       return 0;
1615     if (isSplat(VL)) {
1616       return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0);
1617     }
1618     return getGatherCost(E->Scalars);
1619   }
1620   unsigned Opcode = getSameOpcode(VL);
1621   assert(Opcode && allSameType(VL) && allSameBlock(VL) && "Invalid VL");
1622   Instruction *VL0 = cast<Instruction>(VL[0]);
1623   switch (Opcode) {
1624     case Instruction::PHI: {
1625       return 0;
1626     }
1627     case Instruction::ExtractValue:
1628     case Instruction::ExtractElement: {
1629       if (canReuseExtract(VL, Opcode)) {
1630         int DeadCost = 0;
1631         for (unsigned i = 0, e = VL.size(); i < e; ++i) {
1632           Instruction *E = cast<Instruction>(VL[i]);
1633           if (E->hasOneUse())
1634             // Take credit for instruction that will become dead.
1635             DeadCost +=
1636                 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i);
1637         }
1638         return -DeadCost;
1639       }
1640       return getGatherCost(VecTy);
1641     }
1642     case Instruction::ZExt:
1643     case Instruction::SExt:
1644     case Instruction::FPToUI:
1645     case Instruction::FPToSI:
1646     case Instruction::FPExt:
1647     case Instruction::PtrToInt:
1648     case Instruction::IntToPtr:
1649     case Instruction::SIToFP:
1650     case Instruction::UIToFP:
1651     case Instruction::Trunc:
1652     case Instruction::FPTrunc:
1653     case Instruction::BitCast: {
1654       Type *SrcTy = VL0->getOperand(0)->getType();
1655 
1656       // Calculate the cost of this instruction.
1657       int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(),
1658                                                          VL0->getType(), SrcTy);
1659 
1660       VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size());
1661       int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy);
1662       return VecCost - ScalarCost;
1663     }
1664     case Instruction::FCmp:
1665     case Instruction::ICmp:
1666     case Instruction::Select: {
1667       // Calculate the cost of this instruction.
1668       VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size());
1669       int ScalarCost = VecTy->getNumElements() *
1670           TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty());
1671       int VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy);
1672       return VecCost - ScalarCost;
1673     }
1674     case Instruction::Add:
1675     case Instruction::FAdd:
1676     case Instruction::Sub:
1677     case Instruction::FSub:
1678     case Instruction::Mul:
1679     case Instruction::FMul:
1680     case Instruction::UDiv:
1681     case Instruction::SDiv:
1682     case Instruction::FDiv:
1683     case Instruction::URem:
1684     case Instruction::SRem:
1685     case Instruction::FRem:
1686     case Instruction::Shl:
1687     case Instruction::LShr:
1688     case Instruction::AShr:
1689     case Instruction::And:
1690     case Instruction::Or:
1691     case Instruction::Xor: {
1692       // Certain instructions can be cheaper to vectorize if they have a
1693       // constant second vector operand.
1694       TargetTransformInfo::OperandValueKind Op1VK =
1695           TargetTransformInfo::OK_AnyValue;
1696       TargetTransformInfo::OperandValueKind Op2VK =
1697           TargetTransformInfo::OK_UniformConstantValue;
1698       TargetTransformInfo::OperandValueProperties Op1VP =
1699           TargetTransformInfo::OP_None;
1700       TargetTransformInfo::OperandValueProperties Op2VP =
1701           TargetTransformInfo::OP_None;
1702 
1703       // If all operands are exactly the same ConstantInt then set the
1704       // operand kind to OK_UniformConstantValue.
1705       // If instead not all operands are constants, then set the operand kind
1706       // to OK_AnyValue. If all operands are constants but not the same,
1707       // then set the operand kind to OK_NonUniformConstantValue.
1708       ConstantInt *CInt = nullptr;
1709       for (unsigned i = 0; i < VL.size(); ++i) {
1710         const Instruction *I = cast<Instruction>(VL[i]);
1711         if (!isa<ConstantInt>(I->getOperand(1))) {
1712           Op2VK = TargetTransformInfo::OK_AnyValue;
1713           break;
1714         }
1715         if (i == 0) {
1716           CInt = cast<ConstantInt>(I->getOperand(1));
1717           continue;
1718         }
1719         if (Op2VK == TargetTransformInfo::OK_UniformConstantValue &&
1720             CInt != cast<ConstantInt>(I->getOperand(1)))
1721           Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
1722       }
1723       // FIXME: Currently cost of model modification for division by power of
1724       // 2 is handled for X86 and AArch64. Add support for other targets.
1725       if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt &&
1726           CInt->getValue().isPowerOf2())
1727         Op2VP = TargetTransformInfo::OP_PowerOf2;
1728 
1729       int ScalarCost = VecTy->getNumElements() *
1730                        TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK,
1731                                                    Op2VK, Op1VP, Op2VP);
1732       int VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK,
1733                                                 Op1VP, Op2VP);
1734       return VecCost - ScalarCost;
1735     }
1736     case Instruction::GetElementPtr: {
1737       TargetTransformInfo::OperandValueKind Op1VK =
1738           TargetTransformInfo::OK_AnyValue;
1739       TargetTransformInfo::OperandValueKind Op2VK =
1740           TargetTransformInfo::OK_UniformConstantValue;
1741 
1742       int ScalarCost =
1743           VecTy->getNumElements() *
1744           TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK);
1745       int VecCost =
1746           TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK);
1747 
1748       return VecCost - ScalarCost;
1749     }
1750     case Instruction::Load: {
1751       // Cost of wide load - cost of scalar loads.
1752       unsigned alignment = dyn_cast<LoadInst>(VL0)->getAlignment();
1753       int ScalarLdCost = VecTy->getNumElements() *
1754             TTI->getMemoryOpCost(Instruction::Load, ScalarTy, alignment, 0);
1755       int VecLdCost = TTI->getMemoryOpCost(Instruction::Load,
1756                                            VecTy, alignment, 0);
1757       if (E->NeedToShuffle) {
1758         VecLdCost += TTI->getShuffleCost(
1759             TargetTransformInfo::SK_PermuteSingleSrc, VecTy, 0);
1760       }
1761       return VecLdCost - ScalarLdCost;
1762     }
1763     case Instruction::Store: {
1764       // We know that we can merge the stores. Calculate the cost.
1765       unsigned alignment = dyn_cast<StoreInst>(VL0)->getAlignment();
1766       int ScalarStCost = VecTy->getNumElements() *
1767             TTI->getMemoryOpCost(Instruction::Store, ScalarTy, alignment, 0);
1768       int VecStCost = TTI->getMemoryOpCost(Instruction::Store,
1769                                            VecTy, alignment, 0);
1770       return VecStCost - ScalarStCost;
1771     }
1772     case Instruction::Call: {
1773       CallInst *CI = cast<CallInst>(VL0);
1774       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
1775 
1776       // Calculate the cost of the scalar and vector calls.
1777       SmallVector<Type*, 4> ScalarTys, VecTys;
1778       for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) {
1779         ScalarTys.push_back(CI->getArgOperand(op)->getType());
1780         VecTys.push_back(VectorType::get(CI->getArgOperand(op)->getType(),
1781                                          VecTy->getNumElements()));
1782       }
1783 
1784       FastMathFlags FMF;
1785       if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
1786         FMF = FPMO->getFastMathFlags();
1787 
1788       int ScalarCallCost = VecTy->getNumElements() *
1789           TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys, FMF);
1790 
1791       int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys, FMF);
1792 
1793       DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost
1794             << " (" << VecCallCost  << "-" <<  ScalarCallCost << ")"
1795             << " for " << *CI << "\n");
1796 
1797       return VecCallCost - ScalarCallCost;
1798     }
1799     case Instruction::ShuffleVector: {
1800       TargetTransformInfo::OperandValueKind Op1VK =
1801           TargetTransformInfo::OK_AnyValue;
1802       TargetTransformInfo::OperandValueKind Op2VK =
1803           TargetTransformInfo::OK_AnyValue;
1804       int ScalarCost = 0;
1805       int VecCost = 0;
1806       for (Value *i : VL) {
1807         Instruction *I = cast<Instruction>(i);
1808         if (!I)
1809           break;
1810         ScalarCost +=
1811             TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK);
1812       }
1813       // VecCost is equal to sum of the cost of creating 2 vectors
1814       // and the cost of creating shuffle.
1815       Instruction *I0 = cast<Instruction>(VL[0]);
1816       VecCost =
1817           TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK);
1818       Instruction *I1 = cast<Instruction>(VL[1]);
1819       VecCost +=
1820           TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK);
1821       VecCost +=
1822           TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0);
1823       return VecCost - ScalarCost;
1824     }
1825     default:
1826       llvm_unreachable("Unknown instruction");
1827   }
1828 }
1829 
1830 bool BoUpSLP::isFullyVectorizableTinyTree() {
1831   DEBUG(dbgs() << "SLP: Check whether the tree with height " <<
1832         VectorizableTree.size() << " is fully vectorizable .\n");
1833 
1834   // We only handle trees of heights 1 and 2.
1835   if (VectorizableTree.size() == 1 && !VectorizableTree[0].NeedToGather)
1836     return true;
1837 
1838   if (VectorizableTree.size() != 2)
1839     return false;
1840 
1841   // Handle splat and all-constants stores.
1842   if (!VectorizableTree[0].NeedToGather &&
1843       (allConstant(VectorizableTree[1].Scalars) ||
1844        isSplat(VectorizableTree[1].Scalars)))
1845     return true;
1846 
1847   // Gathering cost would be too much for tiny trees.
1848   if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather)
1849     return false;
1850 
1851   return true;
1852 }
1853 
1854 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() {
1855 
1856   // We can vectorize the tree if its size is greater than or equal to the
1857   // minimum size specified by the MinTreeSize command line option.
1858   if (VectorizableTree.size() >= MinTreeSize)
1859     return false;
1860 
1861   // If we have a tiny tree (a tree whose size is less than MinTreeSize), we
1862   // can vectorize it if we can prove it fully vectorizable.
1863   if (isFullyVectorizableTinyTree())
1864     return false;
1865 
1866   assert(VectorizableTree.empty()
1867              ? ExternalUses.empty()
1868              : true && "We shouldn't have any external users");
1869 
1870   // Otherwise, we can't vectorize the tree. It is both tiny and not fully
1871   // vectorizable.
1872   return true;
1873 }
1874 
1875 int BoUpSLP::getSpillCost() {
1876   // Walk from the bottom of the tree to the top, tracking which values are
1877   // live. When we see a call instruction that is not part of our tree,
1878   // query TTI to see if there is a cost to keeping values live over it
1879   // (for example, if spills and fills are required).
1880   unsigned BundleWidth = VectorizableTree.front().Scalars.size();
1881   int Cost = 0;
1882 
1883   SmallPtrSet<Instruction*, 4> LiveValues;
1884   Instruction *PrevInst = nullptr;
1885 
1886   for (const auto &N : VectorizableTree) {
1887     Instruction *Inst = dyn_cast<Instruction>(N.Scalars[0]);
1888     if (!Inst)
1889       continue;
1890 
1891     if (!PrevInst) {
1892       PrevInst = Inst;
1893       continue;
1894     }
1895 
1896     // Update LiveValues.
1897     LiveValues.erase(PrevInst);
1898     for (auto &J : PrevInst->operands()) {
1899       if (isa<Instruction>(&*J) && ScalarToTreeEntry.count(&*J))
1900         LiveValues.insert(cast<Instruction>(&*J));
1901     }
1902 
1903     DEBUG(
1904       dbgs() << "SLP: #LV: " << LiveValues.size();
1905       for (auto *X : LiveValues)
1906         dbgs() << " " << X->getName();
1907       dbgs() << ", Looking at ";
1908       Inst->dump();
1909       );
1910 
1911     // Now find the sequence of instructions between PrevInst and Inst.
1912     BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(),
1913                                  PrevInstIt =
1914                                      PrevInst->getIterator().getReverse();
1915     while (InstIt != PrevInstIt) {
1916       if (PrevInstIt == PrevInst->getParent()->rend()) {
1917         PrevInstIt = Inst->getParent()->rbegin();
1918         continue;
1919       }
1920 
1921       if (isa<CallInst>(&*PrevInstIt) && &*PrevInstIt != PrevInst) {
1922         SmallVector<Type*, 4> V;
1923         for (auto *II : LiveValues)
1924           V.push_back(VectorType::get(II->getType(), BundleWidth));
1925         Cost += TTI->getCostOfKeepingLiveOverCall(V);
1926       }
1927 
1928       ++PrevInstIt;
1929     }
1930 
1931     PrevInst = Inst;
1932   }
1933 
1934   return Cost;
1935 }
1936 
1937 int BoUpSLP::getTreeCost() {
1938   int Cost = 0;
1939   DEBUG(dbgs() << "SLP: Calculating cost for tree of size " <<
1940         VectorizableTree.size() << ".\n");
1941 
1942   unsigned BundleWidth = VectorizableTree[0].Scalars.size();
1943 
1944   for (TreeEntry &TE : VectorizableTree) {
1945     int C = getEntryCost(&TE);
1946     DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with "
1947                  << *TE.Scalars[0] << ".\n");
1948     Cost += C;
1949   }
1950 
1951   SmallSet<Value *, 16> ExtractCostCalculated;
1952   int ExtractCost = 0;
1953   for (ExternalUser &EU : ExternalUses) {
1954     // We only add extract cost once for the same scalar.
1955     if (!ExtractCostCalculated.insert(EU.Scalar).second)
1956       continue;
1957 
1958     // Uses by ephemeral values are free (because the ephemeral value will be
1959     // removed prior to code generation, and so the extraction will be
1960     // removed as well).
1961     if (EphValues.count(EU.User))
1962       continue;
1963 
1964     // If we plan to rewrite the tree in a smaller type, we will need to sign
1965     // extend the extracted value back to the original type. Here, we account
1966     // for the extract and the added cost of the sign extend if needed.
1967     auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth);
1968     auto *ScalarRoot = VectorizableTree[0].Scalars[0];
1969     if (MinBWs.count(ScalarRoot)) {
1970       auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
1971       auto Extend =
1972           MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt;
1973       VecTy = VectorType::get(MinTy, BundleWidth);
1974       ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(),
1975                                                    VecTy, EU.Lane);
1976     } else {
1977       ExtractCost +=
1978           TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane);
1979     }
1980   }
1981 
1982   int SpillCost = getSpillCost();
1983   Cost += SpillCost + ExtractCost;
1984 
1985   DEBUG(dbgs() << "SLP: Spill Cost = " << SpillCost << ".\n"
1986                << "SLP: Extract Cost = " << ExtractCost << ".\n"
1987                << "SLP: Total Cost = " << Cost << ".\n");
1988   return Cost;
1989 }
1990 
1991 int BoUpSLP::getGatherCost(Type *Ty) {
1992   int Cost = 0;
1993   for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i)
1994     Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
1995   return Cost;
1996 }
1997 
1998 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) {
1999   // Find the type of the operands in VL.
2000   Type *ScalarTy = VL[0]->getType();
2001   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
2002     ScalarTy = SI->getValueOperand()->getType();
2003   VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
2004   // Find the cost of inserting/extracting values from the vector.
2005   return getGatherCost(VecTy);
2006 }
2007 
2008 // Reorder commutative operations in alternate shuffle if the resulting vectors
2009 // are consecutive loads. This would allow us to vectorize the tree.
2010 // If we have something like-
2011 // load a[0] - load b[0]
2012 // load b[1] + load a[1]
2013 // load a[2] - load b[2]
2014 // load a[3] + load b[3]
2015 // Reordering the second load b[1]  load a[1] would allow us to vectorize this
2016 // code.
2017 void BoUpSLP::reorderAltShuffleOperands(ArrayRef<Value *> VL,
2018                                         SmallVectorImpl<Value *> &Left,
2019                                         SmallVectorImpl<Value *> &Right) {
2020   // Push left and right operands of binary operation into Left and Right
2021   for (Value *i : VL) {
2022     Left.push_back(cast<Instruction>(i)->getOperand(0));
2023     Right.push_back(cast<Instruction>(i)->getOperand(1));
2024   }
2025 
2026   // Reorder if we have a commutative operation and consecutive access
2027   // are on either side of the alternate instructions.
2028   for (unsigned j = 0; j < VL.size() - 1; ++j) {
2029     if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) {
2030       if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) {
2031         Instruction *VL1 = cast<Instruction>(VL[j]);
2032         Instruction *VL2 = cast<Instruction>(VL[j + 1]);
2033         if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) {
2034           std::swap(Left[j], Right[j]);
2035           continue;
2036         } else if (VL2->isCommutative() &&
2037                    isConsecutiveAccess(L, L1, *DL, *SE)) {
2038           std::swap(Left[j + 1], Right[j + 1]);
2039           continue;
2040         }
2041         // else unchanged
2042       }
2043     }
2044     if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) {
2045       if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) {
2046         Instruction *VL1 = cast<Instruction>(VL[j]);
2047         Instruction *VL2 = cast<Instruction>(VL[j + 1]);
2048         if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) {
2049           std::swap(Left[j], Right[j]);
2050           continue;
2051         } else if (VL2->isCommutative() &&
2052                    isConsecutiveAccess(L, L1, *DL, *SE)) {
2053           std::swap(Left[j + 1], Right[j + 1]);
2054           continue;
2055         }
2056         // else unchanged
2057       }
2058     }
2059   }
2060 }
2061 
2062 // Return true if I should be commuted before adding it's left and right
2063 // operands to the arrays Left and Right.
2064 //
2065 // The vectorizer is trying to either have all elements one side being
2066 // instruction with the same opcode to enable further vectorization, or having
2067 // a splat to lower the vectorizing cost.
2068 static bool shouldReorderOperands(int i, Instruction &I,
2069                                   SmallVectorImpl<Value *> &Left,
2070                                   SmallVectorImpl<Value *> &Right,
2071                                   bool AllSameOpcodeLeft,
2072                                   bool AllSameOpcodeRight, bool SplatLeft,
2073                                   bool SplatRight) {
2074   Value *VLeft = I.getOperand(0);
2075   Value *VRight = I.getOperand(1);
2076   // If we have "SplatRight", try to see if commuting is needed to preserve it.
2077   if (SplatRight) {
2078     if (VRight == Right[i - 1])
2079       // Preserve SplatRight
2080       return false;
2081     if (VLeft == Right[i - 1]) {
2082       // Commuting would preserve SplatRight, but we don't want to break
2083       // SplatLeft either, i.e. preserve the original order if possible.
2084       // (FIXME: why do we care?)
2085       if (SplatLeft && VLeft == Left[i - 1])
2086         return false;
2087       return true;
2088     }
2089   }
2090   // Symmetrically handle Right side.
2091   if (SplatLeft) {
2092     if (VLeft == Left[i - 1])
2093       // Preserve SplatLeft
2094       return false;
2095     if (VRight == Left[i - 1])
2096       return true;
2097   }
2098 
2099   Instruction *ILeft = dyn_cast<Instruction>(VLeft);
2100   Instruction *IRight = dyn_cast<Instruction>(VRight);
2101 
2102   // If we have "AllSameOpcodeRight", try to see if the left operands preserves
2103   // it and not the right, in this case we want to commute.
2104   if (AllSameOpcodeRight) {
2105     unsigned RightPrevOpcode = cast<Instruction>(Right[i - 1])->getOpcode();
2106     if (IRight && RightPrevOpcode == IRight->getOpcode())
2107       // Do not commute, a match on the right preserves AllSameOpcodeRight
2108       return false;
2109     if (ILeft && RightPrevOpcode == ILeft->getOpcode()) {
2110       // We have a match and may want to commute, but first check if there is
2111       // not also a match on the existing operands on the Left to preserve
2112       // AllSameOpcodeLeft, i.e. preserve the original order if possible.
2113       // (FIXME: why do we care?)
2114       if (AllSameOpcodeLeft && ILeft &&
2115           cast<Instruction>(Left[i - 1])->getOpcode() == ILeft->getOpcode())
2116         return false;
2117       return true;
2118     }
2119   }
2120   // Symmetrically handle Left side.
2121   if (AllSameOpcodeLeft) {
2122     unsigned LeftPrevOpcode = cast<Instruction>(Left[i - 1])->getOpcode();
2123     if (ILeft && LeftPrevOpcode == ILeft->getOpcode())
2124       return false;
2125     if (IRight && LeftPrevOpcode == IRight->getOpcode())
2126       return true;
2127   }
2128   return false;
2129 }
2130 
2131 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
2132                                              SmallVectorImpl<Value *> &Left,
2133                                              SmallVectorImpl<Value *> &Right) {
2134 
2135   if (VL.size()) {
2136     // Peel the first iteration out of the loop since there's nothing
2137     // interesting to do anyway and it simplifies the checks in the loop.
2138     auto VLeft = cast<Instruction>(VL[0])->getOperand(0);
2139     auto VRight = cast<Instruction>(VL[0])->getOperand(1);
2140     if (!isa<Instruction>(VRight) && isa<Instruction>(VLeft))
2141       // Favor having instruction to the right. FIXME: why?
2142       std::swap(VLeft, VRight);
2143     Left.push_back(VLeft);
2144     Right.push_back(VRight);
2145   }
2146 
2147   // Keep track if we have instructions with all the same opcode on one side.
2148   bool AllSameOpcodeLeft = isa<Instruction>(Left[0]);
2149   bool AllSameOpcodeRight = isa<Instruction>(Right[0]);
2150   // Keep track if we have one side with all the same value (broadcast).
2151   bool SplatLeft = true;
2152   bool SplatRight = true;
2153 
2154   for (unsigned i = 1, e = VL.size(); i != e; ++i) {
2155     Instruction *I = cast<Instruction>(VL[i]);
2156     assert(I->isCommutative() && "Can only process commutative instruction");
2157     // Commute to favor either a splat or maximizing having the same opcodes on
2158     // one side.
2159     if (shouldReorderOperands(i, *I, Left, Right, AllSameOpcodeLeft,
2160                               AllSameOpcodeRight, SplatLeft, SplatRight)) {
2161       Left.push_back(I->getOperand(1));
2162       Right.push_back(I->getOperand(0));
2163     } else {
2164       Left.push_back(I->getOperand(0));
2165       Right.push_back(I->getOperand(1));
2166     }
2167     // Update Splat* and AllSameOpcode* after the insertion.
2168     SplatRight = SplatRight && (Right[i - 1] == Right[i]);
2169     SplatLeft = SplatLeft && (Left[i - 1] == Left[i]);
2170     AllSameOpcodeLeft = AllSameOpcodeLeft && isa<Instruction>(Left[i]) &&
2171                         (cast<Instruction>(Left[i - 1])->getOpcode() ==
2172                          cast<Instruction>(Left[i])->getOpcode());
2173     AllSameOpcodeRight = AllSameOpcodeRight && isa<Instruction>(Right[i]) &&
2174                          (cast<Instruction>(Right[i - 1])->getOpcode() ==
2175                           cast<Instruction>(Right[i])->getOpcode());
2176   }
2177 
2178   // If one operand end up being broadcast, return this operand order.
2179   if (SplatRight || SplatLeft)
2180     return;
2181 
2182   // Finally check if we can get longer vectorizable chain by reordering
2183   // without breaking the good operand order detected above.
2184   // E.g. If we have something like-
2185   // load a[0]  load b[0]
2186   // load b[1]  load a[1]
2187   // load a[2]  load b[2]
2188   // load a[3]  load b[3]
2189   // Reordering the second load b[1]  load a[1] would allow us to vectorize
2190   // this code and we still retain AllSameOpcode property.
2191   // FIXME: This load reordering might break AllSameOpcode in some rare cases
2192   // such as-
2193   // add a[0],c[0]  load b[0]
2194   // add a[1],c[2]  load b[1]
2195   // b[2]           load b[2]
2196   // add a[3],c[3]  load b[3]
2197   for (unsigned j = 0; j < VL.size() - 1; ++j) {
2198     if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) {
2199       if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) {
2200         if (isConsecutiveAccess(L, L1, *DL, *SE)) {
2201           std::swap(Left[j + 1], Right[j + 1]);
2202           continue;
2203         }
2204       }
2205     }
2206     if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) {
2207       if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) {
2208         if (isConsecutiveAccess(L, L1, *DL, *SE)) {
2209           std::swap(Left[j + 1], Right[j + 1]);
2210           continue;
2211         }
2212       }
2213     }
2214     // else unchanged
2215   }
2216 }
2217 
2218 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) {
2219 
2220   // Get the basic block this bundle is in. All instructions in the bundle
2221   // should be in this block.
2222   auto *Front = cast<Instruction>(VL.front());
2223   auto *BB = Front->getParent();
2224   assert(all_of(make_range(VL.begin(), VL.end()), [&](Value *V) -> bool {
2225     return cast<Instruction>(V)->getParent() == BB;
2226   }));
2227 
2228   // The last instruction in the bundle in program order.
2229   Instruction *LastInst = nullptr;
2230 
2231   // Find the last instruction. The common case should be that BB has been
2232   // scheduled, and the last instruction is VL.back(). So we start with
2233   // VL.back() and iterate over schedule data until we reach the end of the
2234   // bundle. The end of the bundle is marked by null ScheduleData.
2235   if (BlocksSchedules.count(BB)) {
2236     auto *Bundle = BlocksSchedules[BB]->getScheduleData(VL.back());
2237     if (Bundle && Bundle->isPartOfBundle())
2238       for (; Bundle; Bundle = Bundle->NextInBundle)
2239         LastInst = Bundle->Inst;
2240   }
2241 
2242   // LastInst can still be null at this point if there's either not an entry
2243   // for BB in BlocksSchedules or there's no ScheduleData available for
2244   // VL.back(). This can be the case if buildTree_rec aborts for various
2245   // reasons (e.g., the maximum recursion depth is reached, the maximum region
2246   // size is reached, etc.). ScheduleData is initialized in the scheduling
2247   // "dry-run".
2248   //
2249   // If this happens, we can still find the last instruction by brute force. We
2250   // iterate forwards from Front (inclusive) until we either see all
2251   // instructions in the bundle or reach the end of the block. If Front is the
2252   // last instruction in program order, LastInst will be set to Front, and we
2253   // will visit all the remaining instructions in the block.
2254   //
2255   // One of the reasons we exit early from buildTree_rec is to place an upper
2256   // bound on compile-time. Thus, taking an additional compile-time hit here is
2257   // not ideal. However, this should be exceedingly rare since it requires that
2258   // we both exit early from buildTree_rec and that the bundle be out-of-order
2259   // (causing us to iterate all the way to the end of the block).
2260   if (!LastInst) {
2261     SmallPtrSet<Value *, 16> Bundle(VL.begin(), VL.end());
2262     for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) {
2263       if (Bundle.erase(&I))
2264         LastInst = &I;
2265       if (Bundle.empty())
2266         break;
2267     }
2268   }
2269 
2270   // Set the insertion point after the last instruction in the bundle. Set the
2271   // debug location to Front.
2272   Builder.SetInsertPoint(BB, ++LastInst->getIterator());
2273   Builder.SetCurrentDebugLocation(Front->getDebugLoc());
2274 }
2275 
2276 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) {
2277   Value *Vec = UndefValue::get(Ty);
2278   // Generate the 'InsertElement' instruction.
2279   for (unsigned i = 0; i < Ty->getNumElements(); ++i) {
2280     Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i));
2281     if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) {
2282       GatherSeq.insert(Insrt);
2283       CSEBlocks.insert(Insrt->getParent());
2284 
2285       // Add to our 'need-to-extract' list.
2286       if (ScalarToTreeEntry.count(VL[i])) {
2287         int Idx = ScalarToTreeEntry[VL[i]];
2288         TreeEntry *E = &VectorizableTree[Idx];
2289         // Find which lane we need to extract.
2290         int FoundLane = -1;
2291         for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) {
2292           // Is this the lane of the scalar that we are looking for ?
2293           if (E->Scalars[Lane] == VL[i]) {
2294             FoundLane = Lane;
2295             break;
2296           }
2297         }
2298         assert(FoundLane >= 0 && "Could not find the correct lane");
2299         ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane));
2300       }
2301     }
2302   }
2303 
2304   return Vec;
2305 }
2306 
2307 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const {
2308   SmallDenseMap<Value*, int>::const_iterator Entry
2309     = ScalarToTreeEntry.find(VL[0]);
2310   if (Entry != ScalarToTreeEntry.end()) {
2311     int Idx = Entry->second;
2312     const TreeEntry *En = &VectorizableTree[Idx];
2313     if (En->isSame(VL) && En->VectorizedValue)
2314       return En->VectorizedValue;
2315   }
2316   return nullptr;
2317 }
2318 
2319 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
2320   if (ScalarToTreeEntry.count(VL[0])) {
2321     int Idx = ScalarToTreeEntry[VL[0]];
2322     TreeEntry *E = &VectorizableTree[Idx];
2323     if (E->isSame(VL) || (E->NeedToShuffle && E->isFoundJumbled(VL, *DL, *SE)))
2324       return vectorizeTree(VL, E);
2325   }
2326 
2327   Type *ScalarTy = VL[0]->getType();
2328   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
2329     ScalarTy = SI->getValueOperand()->getType();
2330   VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
2331 
2332   return Gather(VL, VecTy);
2333 }
2334 
2335 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL, TreeEntry *E) {
2336   IRBuilder<>::InsertPointGuard Guard(Builder);
2337 
2338   if (E->VectorizedValue && !E->NeedToShuffle) {
2339     DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
2340     return E->VectorizedValue;
2341   }
2342 
2343   Instruction *VL0 = cast<Instruction>(E->Scalars[0]);
2344   Type *ScalarTy = VL0->getType();
2345   if (StoreInst *SI = dyn_cast<StoreInst>(VL0))
2346     ScalarTy = SI->getValueOperand()->getType();
2347   VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size());
2348 
2349   if (E->NeedToGather) {
2350     setInsertPointAfterBundle(E->Scalars);
2351     auto *V = Gather(E->Scalars, VecTy);
2352     E->VectorizedValue = V;
2353     return V;
2354   }
2355 
2356   unsigned Opcode = getSameOpcode(E->Scalars);
2357 
2358   switch (Opcode) {
2359     case Instruction::PHI: {
2360       PHINode *PH = dyn_cast<PHINode>(VL0);
2361       Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI());
2362       Builder.SetCurrentDebugLocation(PH->getDebugLoc());
2363       PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
2364       E->VectorizedValue = NewPhi;
2365 
2366       // PHINodes may have multiple entries from the same block. We want to
2367       // visit every block once.
2368       SmallSet<BasicBlock*, 4> VisitedBBs;
2369 
2370       for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
2371         ValueList Operands;
2372         BasicBlock *IBB = PH->getIncomingBlock(i);
2373 
2374         if (!VisitedBBs.insert(IBB).second) {
2375           NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
2376           continue;
2377         }
2378 
2379         // Prepare the operand vector.
2380         for (Value *V : E->Scalars)
2381           Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(IBB));
2382 
2383         Builder.SetInsertPoint(IBB->getTerminator());
2384         Builder.SetCurrentDebugLocation(PH->getDebugLoc());
2385         Value *Vec = vectorizeTree(Operands);
2386         NewPhi->addIncoming(Vec, IBB);
2387       }
2388 
2389       assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&
2390              "Invalid number of incoming values");
2391       return NewPhi;
2392     }
2393 
2394     case Instruction::ExtractElement: {
2395       if (canReuseExtract(E->Scalars, Instruction::ExtractElement)) {
2396         Value *V = VL0->getOperand(0);
2397         E->VectorizedValue = V;
2398         return V;
2399       }
2400       setInsertPointAfterBundle(E->Scalars);
2401       auto *V = Gather(E->Scalars, VecTy);
2402       E->VectorizedValue = V;
2403       return V;
2404     }
2405     case Instruction::ExtractValue: {
2406       if (canReuseExtract(E->Scalars, Instruction::ExtractValue)) {
2407         LoadInst *LI = cast<LoadInst>(VL0->getOperand(0));
2408         Builder.SetInsertPoint(LI);
2409         PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace());
2410         Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy);
2411         LoadInst *V = Builder.CreateAlignedLoad(Ptr, LI->getAlignment());
2412         E->VectorizedValue = V;
2413         return propagateMetadata(V, E->Scalars);
2414       }
2415       setInsertPointAfterBundle(E->Scalars);
2416       auto *V = Gather(E->Scalars, VecTy);
2417       E->VectorizedValue = V;
2418       return V;
2419     }
2420     case Instruction::ZExt:
2421     case Instruction::SExt:
2422     case Instruction::FPToUI:
2423     case Instruction::FPToSI:
2424     case Instruction::FPExt:
2425     case Instruction::PtrToInt:
2426     case Instruction::IntToPtr:
2427     case Instruction::SIToFP:
2428     case Instruction::UIToFP:
2429     case Instruction::Trunc:
2430     case Instruction::FPTrunc:
2431     case Instruction::BitCast: {
2432       ValueList INVL;
2433       for (Value *V : E->Scalars)
2434         INVL.push_back(cast<Instruction>(V)->getOperand(0));
2435 
2436       setInsertPointAfterBundle(E->Scalars);
2437 
2438       Value *InVec = vectorizeTree(INVL);
2439 
2440       if (Value *V = alreadyVectorized(E->Scalars))
2441         return V;
2442 
2443       CastInst *CI = dyn_cast<CastInst>(VL0);
2444       Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy);
2445       E->VectorizedValue = V;
2446       ++NumVectorInstructions;
2447       return V;
2448     }
2449     case Instruction::FCmp:
2450     case Instruction::ICmp: {
2451       ValueList LHSV, RHSV;
2452       for (Value *V : E->Scalars) {
2453         LHSV.push_back(cast<Instruction>(V)->getOperand(0));
2454         RHSV.push_back(cast<Instruction>(V)->getOperand(1));
2455       }
2456 
2457       setInsertPointAfterBundle(E->Scalars);
2458 
2459       Value *L = vectorizeTree(LHSV);
2460       Value *R = vectorizeTree(RHSV);
2461 
2462       if (Value *V = alreadyVectorized(E->Scalars))
2463         return V;
2464 
2465       CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
2466       Value *V;
2467       if (Opcode == Instruction::FCmp)
2468         V = Builder.CreateFCmp(P0, L, R);
2469       else
2470         V = Builder.CreateICmp(P0, L, R);
2471 
2472       E->VectorizedValue = V;
2473       propagateIRFlags(E->VectorizedValue, E->Scalars);
2474       ++NumVectorInstructions;
2475       return V;
2476     }
2477     case Instruction::Select: {
2478       ValueList TrueVec, FalseVec, CondVec;
2479       for (Value *V : E->Scalars) {
2480         CondVec.push_back(cast<Instruction>(V)->getOperand(0));
2481         TrueVec.push_back(cast<Instruction>(V)->getOperand(1));
2482         FalseVec.push_back(cast<Instruction>(V)->getOperand(2));
2483       }
2484 
2485       setInsertPointAfterBundle(E->Scalars);
2486 
2487       Value *Cond = vectorizeTree(CondVec);
2488       Value *True = vectorizeTree(TrueVec);
2489       Value *False = vectorizeTree(FalseVec);
2490 
2491       if (Value *V = alreadyVectorized(E->Scalars))
2492         return V;
2493 
2494       Value *V = Builder.CreateSelect(Cond, True, False);
2495       E->VectorizedValue = V;
2496       ++NumVectorInstructions;
2497       return V;
2498     }
2499     case Instruction::Add:
2500     case Instruction::FAdd:
2501     case Instruction::Sub:
2502     case Instruction::FSub:
2503     case Instruction::Mul:
2504     case Instruction::FMul:
2505     case Instruction::UDiv:
2506     case Instruction::SDiv:
2507     case Instruction::FDiv:
2508     case Instruction::URem:
2509     case Instruction::SRem:
2510     case Instruction::FRem:
2511     case Instruction::Shl:
2512     case Instruction::LShr:
2513     case Instruction::AShr:
2514     case Instruction::And:
2515     case Instruction::Or:
2516     case Instruction::Xor: {
2517       ValueList LHSVL, RHSVL;
2518       if (isa<BinaryOperator>(VL0) && VL0->isCommutative())
2519         reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL);
2520       else
2521         for (Value *V : E->Scalars) {
2522           LHSVL.push_back(cast<Instruction>(V)->getOperand(0));
2523           RHSVL.push_back(cast<Instruction>(V)->getOperand(1));
2524         }
2525 
2526       setInsertPointAfterBundle(E->Scalars);
2527 
2528       Value *LHS = vectorizeTree(LHSVL);
2529       Value *RHS = vectorizeTree(RHSVL);
2530 
2531       if (Value *V = alreadyVectorized(E->Scalars))
2532         return V;
2533 
2534       BinaryOperator *BinOp = cast<BinaryOperator>(VL0);
2535       Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS);
2536       E->VectorizedValue = V;
2537       propagateIRFlags(E->VectorizedValue, E->Scalars);
2538       ++NumVectorInstructions;
2539 
2540       if (Instruction *I = dyn_cast<Instruction>(V))
2541         return propagateMetadata(I, E->Scalars);
2542 
2543       return V;
2544     }
2545     case Instruction::Load: {
2546       // Loads are inserted at the head of the tree because we don't want to
2547       // sink them all the way down past store instructions.
2548       setInsertPointAfterBundle(E->Scalars);
2549 
2550       LoadInst *LI = cast<LoadInst>(VL0);
2551       Type *ScalarLoadTy = LI->getType();
2552       unsigned AS = LI->getPointerAddressSpace();
2553 
2554       Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(),
2555                                             VecTy->getPointerTo(AS));
2556 
2557       // The pointer operand uses an in-tree scalar so we add the new BitCast to
2558       // ExternalUses list to make sure that an extract will be generated in the
2559       // future.
2560       if (ScalarToTreeEntry.count(LI->getPointerOperand()))
2561         ExternalUses.push_back(
2562             ExternalUser(LI->getPointerOperand(), cast<User>(VecPtr), 0));
2563 
2564       unsigned Alignment = LI->getAlignment();
2565       LI = Builder.CreateLoad(VecPtr);
2566       if (!Alignment) {
2567         Alignment = DL->getABITypeAlignment(ScalarLoadTy);
2568       }
2569       LI->setAlignment(Alignment);
2570       E->VectorizedValue = LI;
2571       ++NumVectorInstructions;
2572       propagateMetadata(LI, E->Scalars);
2573 
2574       // As program order of scalar loads are jumbled, the vectorized 'load'
2575       // must be followed by a 'shuffle' with the required jumbled mask.
2576       if (!VL.empty() && (E->NeedToShuffle)) {
2577         assert(VL.size() == E->Scalars.size() &&
2578                "Equal number of scalars expected");
2579         SmallVector<Constant *, 8> Mask;
2580         for (Value *Val : VL) {
2581           if (ScalarToTreeEntry.count(Val)) {
2582             int Idx = ScalarToTreeEntry[Val];
2583             TreeEntry *E = &VectorizableTree[Idx];
2584             for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) {
2585               if (E->Scalars[Lane] == Val) {
2586                 Mask.push_back(Builder.getInt32(Lane));
2587                 break;
2588               }
2589             }
2590           }
2591         }
2592 
2593         // Generate shuffle for jumbled memory access
2594         Value *Undef = UndefValue::get(VecTy);
2595         Value *Shuf = Builder.CreateShuffleVector((Value *)LI, Undef,
2596                                                   ConstantVector::get(Mask));
2597         return Shuf;
2598       }
2599 
2600       return LI;
2601     }
2602     case Instruction::Store: {
2603       StoreInst *SI = cast<StoreInst>(VL0);
2604       unsigned Alignment = SI->getAlignment();
2605       unsigned AS = SI->getPointerAddressSpace();
2606 
2607       ValueList ValueOp;
2608       for (Value *V : E->Scalars)
2609         ValueOp.push_back(cast<StoreInst>(V)->getValueOperand());
2610 
2611       setInsertPointAfterBundle(E->Scalars);
2612 
2613       Value *VecValue = vectorizeTree(ValueOp);
2614       Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(),
2615                                             VecTy->getPointerTo(AS));
2616       StoreInst *S = Builder.CreateStore(VecValue, VecPtr);
2617 
2618       // The pointer operand uses an in-tree scalar so we add the new BitCast to
2619       // ExternalUses list to make sure that an extract will be generated in the
2620       // future.
2621       if (ScalarToTreeEntry.count(SI->getPointerOperand()))
2622         ExternalUses.push_back(
2623             ExternalUser(SI->getPointerOperand(), cast<User>(VecPtr), 0));
2624 
2625       if (!Alignment) {
2626         Alignment = DL->getABITypeAlignment(SI->getValueOperand()->getType());
2627       }
2628       S->setAlignment(Alignment);
2629       E->VectorizedValue = S;
2630       ++NumVectorInstructions;
2631       return propagateMetadata(S, E->Scalars);
2632     }
2633     case Instruction::GetElementPtr: {
2634       setInsertPointAfterBundle(E->Scalars);
2635 
2636       ValueList Op0VL;
2637       for (Value *V : E->Scalars)
2638         Op0VL.push_back(cast<GetElementPtrInst>(V)->getOperand(0));
2639 
2640       Value *Op0 = vectorizeTree(Op0VL);
2641 
2642       std::vector<Value *> OpVecs;
2643       for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e;
2644            ++j) {
2645         ValueList OpVL;
2646         for (Value *V : E->Scalars)
2647           OpVL.push_back(cast<GetElementPtrInst>(V)->getOperand(j));
2648 
2649         Value *OpVec = vectorizeTree(OpVL);
2650         OpVecs.push_back(OpVec);
2651       }
2652 
2653       Value *V = Builder.CreateGEP(
2654           cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs);
2655       E->VectorizedValue = V;
2656       ++NumVectorInstructions;
2657 
2658       if (Instruction *I = dyn_cast<Instruction>(V))
2659         return propagateMetadata(I, E->Scalars);
2660 
2661       return V;
2662     }
2663     case Instruction::Call: {
2664       CallInst *CI = cast<CallInst>(VL0);
2665       setInsertPointAfterBundle(E->Scalars);
2666       Function *FI;
2667       Intrinsic::ID IID  = Intrinsic::not_intrinsic;
2668       Value *ScalarArg = nullptr;
2669       if (CI && (FI = CI->getCalledFunction())) {
2670         IID = FI->getIntrinsicID();
2671       }
2672       std::vector<Value *> OpVecs;
2673       for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) {
2674         ValueList OpVL;
2675         // ctlz,cttz and powi are special intrinsics whose second argument is
2676         // a scalar. This argument should not be vectorized.
2677         if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) {
2678           CallInst *CEI = cast<CallInst>(E->Scalars[0]);
2679           ScalarArg = CEI->getArgOperand(j);
2680           OpVecs.push_back(CEI->getArgOperand(j));
2681           continue;
2682         }
2683         for (Value *V : E->Scalars) {
2684           CallInst *CEI = cast<CallInst>(V);
2685           OpVL.push_back(CEI->getArgOperand(j));
2686         }
2687 
2688         Value *OpVec = vectorizeTree(OpVL);
2689         DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n");
2690         OpVecs.push_back(OpVec);
2691       }
2692 
2693       Module *M = F->getParent();
2694       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
2695       Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) };
2696       Function *CF = Intrinsic::getDeclaration(M, ID, Tys);
2697       SmallVector<OperandBundleDef, 1> OpBundles;
2698       CI->getOperandBundlesAsDefs(OpBundles);
2699       Value *V = Builder.CreateCall(CF, OpVecs, OpBundles);
2700 
2701       // The scalar argument uses an in-tree scalar so we add the new vectorized
2702       // call to ExternalUses list to make sure that an extract will be
2703       // generated in the future.
2704       if (ScalarArg && ScalarToTreeEntry.count(ScalarArg))
2705         ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0));
2706 
2707       E->VectorizedValue = V;
2708       propagateIRFlags(E->VectorizedValue, E->Scalars);
2709       ++NumVectorInstructions;
2710       return V;
2711     }
2712     case Instruction::ShuffleVector: {
2713       ValueList LHSVL, RHSVL;
2714       assert(isa<BinaryOperator>(VL0) && "Invalid Shuffle Vector Operand");
2715       reorderAltShuffleOperands(E->Scalars, LHSVL, RHSVL);
2716       setInsertPointAfterBundle(E->Scalars);
2717 
2718       Value *LHS = vectorizeTree(LHSVL);
2719       Value *RHS = vectorizeTree(RHSVL);
2720 
2721       if (Value *V = alreadyVectorized(E->Scalars))
2722         return V;
2723 
2724       // Create a vector of LHS op1 RHS
2725       BinaryOperator *BinOp0 = cast<BinaryOperator>(VL0);
2726       Value *V0 = Builder.CreateBinOp(BinOp0->getOpcode(), LHS, RHS);
2727 
2728       // Create a vector of LHS op2 RHS
2729       Instruction *VL1 = cast<Instruction>(E->Scalars[1]);
2730       BinaryOperator *BinOp1 = cast<BinaryOperator>(VL1);
2731       Value *V1 = Builder.CreateBinOp(BinOp1->getOpcode(), LHS, RHS);
2732 
2733       // Create shuffle to take alternate operations from the vector.
2734       // Also, gather up odd and even scalar ops to propagate IR flags to
2735       // each vector operation.
2736       ValueList OddScalars, EvenScalars;
2737       unsigned e = E->Scalars.size();
2738       SmallVector<Constant *, 8> Mask(e);
2739       for (unsigned i = 0; i < e; ++i) {
2740         if (i & 1) {
2741           Mask[i] = Builder.getInt32(e + i);
2742           OddScalars.push_back(E->Scalars[i]);
2743         } else {
2744           Mask[i] = Builder.getInt32(i);
2745           EvenScalars.push_back(E->Scalars[i]);
2746         }
2747       }
2748 
2749       Value *ShuffleMask = ConstantVector::get(Mask);
2750       propagateIRFlags(V0, EvenScalars);
2751       propagateIRFlags(V1, OddScalars);
2752 
2753       Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
2754       E->VectorizedValue = V;
2755       ++NumVectorInstructions;
2756       if (Instruction *I = dyn_cast<Instruction>(V))
2757         return propagateMetadata(I, E->Scalars);
2758 
2759       return V;
2760     }
2761     default:
2762     llvm_unreachable("unknown inst");
2763   }
2764   return nullptr;
2765 }
2766 
2767 Value *BoUpSLP::vectorizeTree() {
2768 
2769   // All blocks must be scheduled before any instructions are inserted.
2770   for (auto &BSIter : BlocksSchedules) {
2771     scheduleBlock(BSIter.second.get());
2772   }
2773 
2774   Builder.SetInsertPoint(&F->getEntryBlock().front());
2775   auto *VectorRoot = vectorizeTree(ArrayRef<Value *>(), &VectorizableTree[0]);
2776 
2777   // If the vectorized tree can be rewritten in a smaller type, we truncate the
2778   // vectorized root. InstCombine will then rewrite the entire expression. We
2779   // sign extend the extracted values below.
2780   auto *ScalarRoot = VectorizableTree[0].Scalars[0];
2781   if (MinBWs.count(ScalarRoot)) {
2782     if (auto *I = dyn_cast<Instruction>(VectorRoot))
2783       Builder.SetInsertPoint(&*++BasicBlock::iterator(I));
2784     auto BundleWidth = VectorizableTree[0].Scalars.size();
2785     auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
2786     auto *VecTy = VectorType::get(MinTy, BundleWidth);
2787     auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy);
2788     VectorizableTree[0].VectorizedValue = Trunc;
2789   }
2790 
2791   DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n");
2792 
2793   // If necessary, sign-extend or zero-extend ScalarRoot to the larger type
2794   // specified by ScalarType.
2795   auto extend = [&](Value *ScalarRoot, Value *Ex, Type *ScalarType) {
2796     if (!MinBWs.count(ScalarRoot))
2797       return Ex;
2798     if (MinBWs[ScalarRoot].second)
2799       return Builder.CreateSExt(Ex, ScalarType);
2800     return Builder.CreateZExt(Ex, ScalarType);
2801   };
2802 
2803   // Extract all of the elements with the external uses.
2804   for (const auto &ExternalUse : ExternalUses) {
2805     Value *Scalar = ExternalUse.Scalar;
2806     llvm::User *User = ExternalUse.User;
2807 
2808     // Skip users that we already RAUW. This happens when one instruction
2809     // has multiple uses of the same value.
2810     if (!is_contained(Scalar->users(), User))
2811       continue;
2812     assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar");
2813 
2814     int Idx = ScalarToTreeEntry[Scalar];
2815     TreeEntry *E = &VectorizableTree[Idx];
2816     assert(!E->NeedToGather && "Extracting from a gather list");
2817 
2818     Value *Vec = E->VectorizedValue;
2819     assert(Vec && "Can't find vectorizable value");
2820 
2821     Value *Lane = Builder.getInt32(ExternalUse.Lane);
2822     // Generate extracts for out-of-tree users.
2823     // Find the insertion point for the extractelement lane.
2824     if (auto *VecI = dyn_cast<Instruction>(Vec)) {
2825       if (PHINode *PH = dyn_cast<PHINode>(User)) {
2826         for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) {
2827           if (PH->getIncomingValue(i) == Scalar) {
2828             TerminatorInst *IncomingTerminator =
2829                 PH->getIncomingBlock(i)->getTerminator();
2830             if (isa<CatchSwitchInst>(IncomingTerminator)) {
2831               Builder.SetInsertPoint(VecI->getParent(),
2832                                      std::next(VecI->getIterator()));
2833             } else {
2834               Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator());
2835             }
2836             Value *Ex = Builder.CreateExtractElement(Vec, Lane);
2837             Ex = extend(ScalarRoot, Ex, Scalar->getType());
2838             CSEBlocks.insert(PH->getIncomingBlock(i));
2839             PH->setOperand(i, Ex);
2840           }
2841         }
2842       } else {
2843         Builder.SetInsertPoint(cast<Instruction>(User));
2844         Value *Ex = Builder.CreateExtractElement(Vec, Lane);
2845         Ex = extend(ScalarRoot, Ex, Scalar->getType());
2846         CSEBlocks.insert(cast<Instruction>(User)->getParent());
2847         User->replaceUsesOfWith(Scalar, Ex);
2848      }
2849     } else {
2850       Builder.SetInsertPoint(&F->getEntryBlock().front());
2851       Value *Ex = Builder.CreateExtractElement(Vec, Lane);
2852       Ex = extend(ScalarRoot, Ex, Scalar->getType());
2853       CSEBlocks.insert(&F->getEntryBlock());
2854       User->replaceUsesOfWith(Scalar, Ex);
2855     }
2856 
2857     DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n");
2858   }
2859 
2860   // For each vectorized value:
2861   for (TreeEntry &EIdx : VectorizableTree) {
2862     TreeEntry *Entry = &EIdx;
2863 
2864     // For each lane:
2865     for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
2866       Value *Scalar = Entry->Scalars[Lane];
2867       // No need to handle users of gathered values.
2868       if (Entry->NeedToGather)
2869         continue;
2870 
2871       assert(Entry->VectorizedValue && "Can't find vectorizable value");
2872 
2873       Type *Ty = Scalar->getType();
2874       if (!Ty->isVoidTy()) {
2875 #ifndef NDEBUG
2876         for (User *U : Scalar->users()) {
2877           DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
2878 
2879           assert((ScalarToTreeEntry.count(U) ||
2880                   // It is legal to replace users in the ignorelist by undef.
2881                   is_contained(UserIgnoreList, U)) &&
2882                  "Replacing out-of-tree value with undef");
2883         }
2884 #endif
2885         Value *Undef = UndefValue::get(Ty);
2886         Scalar->replaceAllUsesWith(Undef);
2887       }
2888       DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
2889       eraseInstruction(cast<Instruction>(Scalar));
2890     }
2891   }
2892 
2893   Builder.ClearInsertionPoint();
2894 
2895   return VectorizableTree[0].VectorizedValue;
2896 }
2897 
2898 void BoUpSLP::optimizeGatherSequence() {
2899   DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size()
2900         << " gather sequences instructions.\n");
2901   // LICM InsertElementInst sequences.
2902   for (Instruction *it : GatherSeq) {
2903     InsertElementInst *Insert = dyn_cast<InsertElementInst>(it);
2904 
2905     if (!Insert)
2906       continue;
2907 
2908     // Check if this block is inside a loop.
2909     Loop *L = LI->getLoopFor(Insert->getParent());
2910     if (!L)
2911       continue;
2912 
2913     // Check if it has a preheader.
2914     BasicBlock *PreHeader = L->getLoopPreheader();
2915     if (!PreHeader)
2916       continue;
2917 
2918     // If the vector or the element that we insert into it are
2919     // instructions that are defined in this basic block then we can't
2920     // hoist this instruction.
2921     Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0));
2922     Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1));
2923     if (CurrVec && L->contains(CurrVec))
2924       continue;
2925     if (NewElem && L->contains(NewElem))
2926       continue;
2927 
2928     // We can hoist this instruction. Move it to the pre-header.
2929     Insert->moveBefore(PreHeader->getTerminator());
2930   }
2931 
2932   // Make a list of all reachable blocks in our CSE queue.
2933   SmallVector<const DomTreeNode *, 8> CSEWorkList;
2934   CSEWorkList.reserve(CSEBlocks.size());
2935   for (BasicBlock *BB : CSEBlocks)
2936     if (DomTreeNode *N = DT->getNode(BB)) {
2937       assert(DT->isReachableFromEntry(N));
2938       CSEWorkList.push_back(N);
2939     }
2940 
2941   // Sort blocks by domination. This ensures we visit a block after all blocks
2942   // dominating it are visited.
2943   std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(),
2944                    [this](const DomTreeNode *A, const DomTreeNode *B) {
2945     return DT->properlyDominates(A, B);
2946   });
2947 
2948   // Perform O(N^2) search over the gather sequences and merge identical
2949   // instructions. TODO: We can further optimize this scan if we split the
2950   // instructions into different buckets based on the insert lane.
2951   SmallVector<Instruction *, 16> Visited;
2952   for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) {
2953     assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&
2954            "Worklist not sorted properly!");
2955     BasicBlock *BB = (*I)->getBlock();
2956     // For all instructions in blocks containing gather sequences:
2957     for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) {
2958       Instruction *In = &*it++;
2959       if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In))
2960         continue;
2961 
2962       // Check if we can replace this instruction with any of the
2963       // visited instructions.
2964       for (Instruction *v : Visited) {
2965         if (In->isIdenticalTo(v) &&
2966             DT->dominates(v->getParent(), In->getParent())) {
2967           In->replaceAllUsesWith(v);
2968           eraseInstruction(In);
2969           In = nullptr;
2970           break;
2971         }
2972       }
2973       if (In) {
2974         assert(!is_contained(Visited, In));
2975         Visited.push_back(In);
2976       }
2977     }
2978   }
2979   CSEBlocks.clear();
2980   GatherSeq.clear();
2981 }
2982 
2983 // Groups the instructions to a bundle (which is then a single scheduling entity)
2984 // and schedules instructions until the bundle gets ready.
2985 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL,
2986                                                  BoUpSLP *SLP) {
2987   if (isa<PHINode>(VL[0]))
2988     return true;
2989 
2990   // Initialize the instruction bundle.
2991   Instruction *OldScheduleEnd = ScheduleEnd;
2992   ScheduleData *PrevInBundle = nullptr;
2993   ScheduleData *Bundle = nullptr;
2994   bool ReSchedule = false;
2995   DEBUG(dbgs() << "SLP:  bundle: " << *VL[0] << "\n");
2996 
2997   // Make sure that the scheduling region contains all
2998   // instructions of the bundle.
2999   for (Value *V : VL) {
3000     if (!extendSchedulingRegion(V))
3001       return false;
3002   }
3003 
3004   for (Value *V : VL) {
3005     ScheduleData *BundleMember = getScheduleData(V);
3006     assert(BundleMember &&
3007            "no ScheduleData for bundle member (maybe not in same basic block)");
3008     if (BundleMember->IsScheduled) {
3009       // A bundle member was scheduled as single instruction before and now
3010       // needs to be scheduled as part of the bundle. We just get rid of the
3011       // existing schedule.
3012       DEBUG(dbgs() << "SLP:  reset schedule because " << *BundleMember
3013                    << " was already scheduled\n");
3014       ReSchedule = true;
3015     }
3016     assert(BundleMember->isSchedulingEntity() &&
3017            "bundle member already part of other bundle");
3018     if (PrevInBundle) {
3019       PrevInBundle->NextInBundle = BundleMember;
3020     } else {
3021       Bundle = BundleMember;
3022     }
3023     BundleMember->UnscheduledDepsInBundle = 0;
3024     Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps;
3025 
3026     // Group the instructions to a bundle.
3027     BundleMember->FirstInBundle = Bundle;
3028     PrevInBundle = BundleMember;
3029   }
3030   if (ScheduleEnd != OldScheduleEnd) {
3031     // The scheduling region got new instructions at the lower end (or it is a
3032     // new region for the first bundle). This makes it necessary to
3033     // recalculate all dependencies.
3034     // It is seldom that this needs to be done a second time after adding the
3035     // initial bundle to the region.
3036     for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
3037       ScheduleData *SD = getScheduleData(I);
3038       SD->clearDependencies();
3039     }
3040     ReSchedule = true;
3041   }
3042   if (ReSchedule) {
3043     resetSchedule();
3044     initialFillReadyList(ReadyInsts);
3045   }
3046 
3047   DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block "
3048                << BB->getName() << "\n");
3049 
3050   calculateDependencies(Bundle, true, SLP);
3051 
3052   // Now try to schedule the new bundle. As soon as the bundle is "ready" it
3053   // means that there are no cyclic dependencies and we can schedule it.
3054   // Note that's important that we don't "schedule" the bundle yet (see
3055   // cancelScheduling).
3056   while (!Bundle->isReady() && !ReadyInsts.empty()) {
3057 
3058     ScheduleData *pickedSD = ReadyInsts.back();
3059     ReadyInsts.pop_back();
3060 
3061     if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) {
3062       schedule(pickedSD, ReadyInsts);
3063     }
3064   }
3065   if (!Bundle->isReady()) {
3066     cancelScheduling(VL);
3067     return false;
3068   }
3069   return true;
3070 }
3071 
3072 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL) {
3073   if (isa<PHINode>(VL[0]))
3074     return;
3075 
3076   ScheduleData *Bundle = getScheduleData(VL[0]);
3077   DEBUG(dbgs() << "SLP:  cancel scheduling of " << *Bundle << "\n");
3078   assert(!Bundle->IsScheduled &&
3079          "Can't cancel bundle which is already scheduled");
3080   assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() &&
3081          "tried to unbundle something which is not a bundle");
3082 
3083   // Un-bundle: make single instructions out of the bundle.
3084   ScheduleData *BundleMember = Bundle;
3085   while (BundleMember) {
3086     assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links");
3087     BundleMember->FirstInBundle = BundleMember;
3088     ScheduleData *Next = BundleMember->NextInBundle;
3089     BundleMember->NextInBundle = nullptr;
3090     BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps;
3091     if (BundleMember->UnscheduledDepsInBundle == 0) {
3092       ReadyInsts.insert(BundleMember);
3093     }
3094     BundleMember = Next;
3095   }
3096 }
3097 
3098 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V) {
3099   if (getScheduleData(V))
3100     return true;
3101   Instruction *I = dyn_cast<Instruction>(V);
3102   assert(I && "bundle member must be an instruction");
3103   assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled");
3104   if (!ScheduleStart) {
3105     // It's the first instruction in the new region.
3106     initScheduleData(I, I->getNextNode(), nullptr, nullptr);
3107     ScheduleStart = I;
3108     ScheduleEnd = I->getNextNode();
3109     assert(ScheduleEnd && "tried to vectorize a TerminatorInst?");
3110     DEBUG(dbgs() << "SLP:  initialize schedule region to " << *I << "\n");
3111     return true;
3112   }
3113   // Search up and down at the same time, because we don't know if the new
3114   // instruction is above or below the existing scheduling region.
3115   BasicBlock::reverse_iterator UpIter =
3116       ++ScheduleStart->getIterator().getReverse();
3117   BasicBlock::reverse_iterator UpperEnd = BB->rend();
3118   BasicBlock::iterator DownIter = ScheduleEnd->getIterator();
3119   BasicBlock::iterator LowerEnd = BB->end();
3120   for (;;) {
3121     if (++ScheduleRegionSize > ScheduleRegionSizeLimit) {
3122       DEBUG(dbgs() << "SLP:  exceeded schedule region size limit\n");
3123       return false;
3124     }
3125 
3126     if (UpIter != UpperEnd) {
3127       if (&*UpIter == I) {
3128         initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion);
3129         ScheduleStart = I;
3130         DEBUG(dbgs() << "SLP:  extend schedule region start to " << *I << "\n");
3131         return true;
3132       }
3133       UpIter++;
3134     }
3135     if (DownIter != LowerEnd) {
3136       if (&*DownIter == I) {
3137         initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion,
3138                          nullptr);
3139         ScheduleEnd = I->getNextNode();
3140         assert(ScheduleEnd && "tried to vectorize a TerminatorInst?");
3141         DEBUG(dbgs() << "SLP:  extend schedule region end to " << *I << "\n");
3142         return true;
3143       }
3144       DownIter++;
3145     }
3146     assert((UpIter != UpperEnd || DownIter != LowerEnd) &&
3147            "instruction not found in block");
3148   }
3149   return true;
3150 }
3151 
3152 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI,
3153                                                 Instruction *ToI,
3154                                                 ScheduleData *PrevLoadStore,
3155                                                 ScheduleData *NextLoadStore) {
3156   ScheduleData *CurrentLoadStore = PrevLoadStore;
3157   for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) {
3158     ScheduleData *SD = ScheduleDataMap[I];
3159     if (!SD) {
3160       // Allocate a new ScheduleData for the instruction.
3161       if (ChunkPos >= ChunkSize) {
3162         ScheduleDataChunks.push_back(
3163             llvm::make_unique<ScheduleData[]>(ChunkSize));
3164         ChunkPos = 0;
3165       }
3166       SD = &(ScheduleDataChunks.back()[ChunkPos++]);
3167       ScheduleDataMap[I] = SD;
3168       SD->Inst = I;
3169     }
3170     assert(!isInSchedulingRegion(SD) &&
3171            "new ScheduleData already in scheduling region");
3172     SD->init(SchedulingRegionID);
3173 
3174     if (I->mayReadOrWriteMemory()) {
3175       // Update the linked list of memory accessing instructions.
3176       if (CurrentLoadStore) {
3177         CurrentLoadStore->NextLoadStore = SD;
3178       } else {
3179         FirstLoadStoreInRegion = SD;
3180       }
3181       CurrentLoadStore = SD;
3182     }
3183   }
3184   if (NextLoadStore) {
3185     if (CurrentLoadStore)
3186       CurrentLoadStore->NextLoadStore = NextLoadStore;
3187   } else {
3188     LastLoadStoreInRegion = CurrentLoadStore;
3189   }
3190 }
3191 
3192 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
3193                                                      bool InsertInReadyList,
3194                                                      BoUpSLP *SLP) {
3195   assert(SD->isSchedulingEntity());
3196 
3197   SmallVector<ScheduleData *, 10> WorkList;
3198   WorkList.push_back(SD);
3199 
3200   while (!WorkList.empty()) {
3201     ScheduleData *SD = WorkList.back();
3202     WorkList.pop_back();
3203 
3204     ScheduleData *BundleMember = SD;
3205     while (BundleMember) {
3206       assert(isInSchedulingRegion(BundleMember));
3207       if (!BundleMember->hasValidDependencies()) {
3208 
3209         DEBUG(dbgs() << "SLP:       update deps of " << *BundleMember << "\n");
3210         BundleMember->Dependencies = 0;
3211         BundleMember->resetUnscheduledDeps();
3212 
3213         // Handle def-use chain dependencies.
3214         for (User *U : BundleMember->Inst->users()) {
3215           if (isa<Instruction>(U)) {
3216             ScheduleData *UseSD = getScheduleData(U);
3217             if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
3218               BundleMember->Dependencies++;
3219               ScheduleData *DestBundle = UseSD->FirstInBundle;
3220               if (!DestBundle->IsScheduled) {
3221                 BundleMember->incrementUnscheduledDeps(1);
3222               }
3223               if (!DestBundle->hasValidDependencies()) {
3224                 WorkList.push_back(DestBundle);
3225               }
3226             }
3227           } else {
3228             // I'm not sure if this can ever happen. But we need to be safe.
3229             // This lets the instruction/bundle never be scheduled and
3230             // eventually disable vectorization.
3231             BundleMember->Dependencies++;
3232             BundleMember->incrementUnscheduledDeps(1);
3233           }
3234         }
3235 
3236         // Handle the memory dependencies.
3237         ScheduleData *DepDest = BundleMember->NextLoadStore;
3238         if (DepDest) {
3239           Instruction *SrcInst = BundleMember->Inst;
3240           MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA);
3241           bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
3242           unsigned numAliased = 0;
3243           unsigned DistToSrc = 1;
3244 
3245           while (DepDest) {
3246             assert(isInSchedulingRegion(DepDest));
3247 
3248             // We have two limits to reduce the complexity:
3249             // 1) AliasedCheckLimit: It's a small limit to reduce calls to
3250             //    SLP->isAliased (which is the expensive part in this loop).
3251             // 2) MaxMemDepDistance: It's for very large blocks and it aborts
3252             //    the whole loop (even if the loop is fast, it's quadratic).
3253             //    It's important for the loop break condition (see below) to
3254             //    check this limit even between two read-only instructions.
3255             if (DistToSrc >= MaxMemDepDistance ||
3256                     ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
3257                      (numAliased >= AliasedCheckLimit ||
3258                       SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
3259 
3260               // We increment the counter only if the locations are aliased
3261               // (instead of counting all alias checks). This gives a better
3262               // balance between reduced runtime and accurate dependencies.
3263               numAliased++;
3264 
3265               DepDest->MemoryDependencies.push_back(BundleMember);
3266               BundleMember->Dependencies++;
3267               ScheduleData *DestBundle = DepDest->FirstInBundle;
3268               if (!DestBundle->IsScheduled) {
3269                 BundleMember->incrementUnscheduledDeps(1);
3270               }
3271               if (!DestBundle->hasValidDependencies()) {
3272                 WorkList.push_back(DestBundle);
3273               }
3274             }
3275             DepDest = DepDest->NextLoadStore;
3276 
3277             // Example, explaining the loop break condition: Let's assume our
3278             // starting instruction is i0 and MaxMemDepDistance = 3.
3279             //
3280             //                      +--------v--v--v
3281             //             i0,i1,i2,i3,i4,i5,i6,i7,i8
3282             //             +--------^--^--^
3283             //
3284             // MaxMemDepDistance let us stop alias-checking at i3 and we add
3285             // dependencies from i0 to i3,i4,.. (even if they are not aliased).
3286             // Previously we already added dependencies from i3 to i6,i7,i8
3287             // (because of MaxMemDepDistance). As we added a dependency from
3288             // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
3289             // and we can abort this loop at i6.
3290             if (DistToSrc >= 2 * MaxMemDepDistance)
3291                 break;
3292             DistToSrc++;
3293           }
3294         }
3295       }
3296       BundleMember = BundleMember->NextInBundle;
3297     }
3298     if (InsertInReadyList && SD->isReady()) {
3299       ReadyInsts.push_back(SD);
3300       DEBUG(dbgs() << "SLP:     gets ready on update: " << *SD->Inst << "\n");
3301     }
3302   }
3303 }
3304 
3305 void BoUpSLP::BlockScheduling::resetSchedule() {
3306   assert(ScheduleStart &&
3307          "tried to reset schedule on block which has not been scheduled");
3308   for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
3309     ScheduleData *SD = getScheduleData(I);
3310     assert(isInSchedulingRegion(SD));
3311     SD->IsScheduled = false;
3312     SD->resetUnscheduledDeps();
3313   }
3314   ReadyInsts.clear();
3315 }
3316 
3317 void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
3318 
3319   if (!BS->ScheduleStart)
3320     return;
3321 
3322   DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n");
3323 
3324   BS->resetSchedule();
3325 
3326   // For the real scheduling we use a more sophisticated ready-list: it is
3327   // sorted by the original instruction location. This lets the final schedule
3328   // be as  close as possible to the original instruction order.
3329   struct ScheduleDataCompare {
3330     bool operator()(ScheduleData *SD1, ScheduleData *SD2) const {
3331       return SD2->SchedulingPriority < SD1->SchedulingPriority;
3332     }
3333   };
3334   std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts;
3335 
3336   // Ensure that all dependency data is updated and fill the ready-list with
3337   // initial instructions.
3338   int Idx = 0;
3339   int NumToSchedule = 0;
3340   for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd;
3341        I = I->getNextNode()) {
3342     ScheduleData *SD = BS->getScheduleData(I);
3343     assert(
3344         SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD->Inst) != 0) &&
3345         "scheduler and vectorizer have different opinion on what is a bundle");
3346     SD->FirstInBundle->SchedulingPriority = Idx++;
3347     if (SD->isSchedulingEntity()) {
3348       BS->calculateDependencies(SD, false, this);
3349       NumToSchedule++;
3350     }
3351   }
3352   BS->initialFillReadyList(ReadyInsts);
3353 
3354   Instruction *LastScheduledInst = BS->ScheduleEnd;
3355 
3356   // Do the "real" scheduling.
3357   while (!ReadyInsts.empty()) {
3358     ScheduleData *picked = *ReadyInsts.begin();
3359     ReadyInsts.erase(ReadyInsts.begin());
3360 
3361     // Move the scheduled instruction(s) to their dedicated places, if not
3362     // there yet.
3363     ScheduleData *BundleMember = picked;
3364     while (BundleMember) {
3365       Instruction *pickedInst = BundleMember->Inst;
3366       if (LastScheduledInst->getNextNode() != pickedInst) {
3367         BS->BB->getInstList().remove(pickedInst);
3368         BS->BB->getInstList().insert(LastScheduledInst->getIterator(),
3369                                      pickedInst);
3370       }
3371       LastScheduledInst = pickedInst;
3372       BundleMember = BundleMember->NextInBundle;
3373     }
3374 
3375     BS->schedule(picked, ReadyInsts);
3376     NumToSchedule--;
3377   }
3378   assert(NumToSchedule == 0 && "could not schedule all instructions");
3379 
3380   // Avoid duplicate scheduling of the block.
3381   BS->ScheduleStart = nullptr;
3382 }
3383 
3384 unsigned BoUpSLP::getVectorElementSize(Value *V) {
3385   // If V is a store, just return the width of the stored value without
3386   // traversing the expression tree. This is the common case.
3387   if (auto *Store = dyn_cast<StoreInst>(V))
3388     return DL->getTypeSizeInBits(Store->getValueOperand()->getType());
3389 
3390   // If V is not a store, we can traverse the expression tree to find loads
3391   // that feed it. The type of the loaded value may indicate a more suitable
3392   // width than V's type. We want to base the vector element size on the width
3393   // of memory operations where possible.
3394   SmallVector<Instruction *, 16> Worklist;
3395   SmallPtrSet<Instruction *, 16> Visited;
3396   if (auto *I = dyn_cast<Instruction>(V))
3397     Worklist.push_back(I);
3398 
3399   // Traverse the expression tree in bottom-up order looking for loads. If we
3400   // encounter an instruciton we don't yet handle, we give up.
3401   auto MaxWidth = 0u;
3402   auto FoundUnknownInst = false;
3403   while (!Worklist.empty() && !FoundUnknownInst) {
3404     auto *I = Worklist.pop_back_val();
3405     Visited.insert(I);
3406 
3407     // We should only be looking at scalar instructions here. If the current
3408     // instruction has a vector type, give up.
3409     auto *Ty = I->getType();
3410     if (isa<VectorType>(Ty))
3411       FoundUnknownInst = true;
3412 
3413     // If the current instruction is a load, update MaxWidth to reflect the
3414     // width of the loaded value.
3415     else if (isa<LoadInst>(I))
3416       MaxWidth = std::max<unsigned>(MaxWidth, DL->getTypeSizeInBits(Ty));
3417 
3418     // Otherwise, we need to visit the operands of the instruction. We only
3419     // handle the interesting cases from buildTree here. If an operand is an
3420     // instruction we haven't yet visited, we add it to the worklist.
3421     else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
3422              isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I)) {
3423       for (Use &U : I->operands())
3424         if (auto *J = dyn_cast<Instruction>(U.get()))
3425           if (!Visited.count(J))
3426             Worklist.push_back(J);
3427     }
3428 
3429     // If we don't yet handle the instruction, give up.
3430     else
3431       FoundUnknownInst = true;
3432   }
3433 
3434   // If we didn't encounter a memory access in the expression tree, or if we
3435   // gave up for some reason, just return the width of V.
3436   if (!MaxWidth || FoundUnknownInst)
3437     return DL->getTypeSizeInBits(V->getType());
3438 
3439   // Otherwise, return the maximum width we found.
3440   return MaxWidth;
3441 }
3442 
3443 // Determine if a value V in a vectorizable expression Expr can be demoted to a
3444 // smaller type with a truncation. We collect the values that will be demoted
3445 // in ToDemote and additional roots that require investigating in Roots.
3446 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr,
3447                                   SmallVectorImpl<Value *> &ToDemote,
3448                                   SmallVectorImpl<Value *> &Roots) {
3449 
3450   // We can always demote constants.
3451   if (isa<Constant>(V)) {
3452     ToDemote.push_back(V);
3453     return true;
3454   }
3455 
3456   // If the value is not an instruction in the expression with only one use, it
3457   // cannot be demoted.
3458   auto *I = dyn_cast<Instruction>(V);
3459   if (!I || !I->hasOneUse() || !Expr.count(I))
3460     return false;
3461 
3462   switch (I->getOpcode()) {
3463 
3464   // We can always demote truncations and extensions. Since truncations can
3465   // seed additional demotion, we save the truncated value.
3466   case Instruction::Trunc:
3467     Roots.push_back(I->getOperand(0));
3468   case Instruction::ZExt:
3469   case Instruction::SExt:
3470     break;
3471 
3472   // We can demote certain binary operations if we can demote both of their
3473   // operands.
3474   case Instruction::Add:
3475   case Instruction::Sub:
3476   case Instruction::Mul:
3477   case Instruction::And:
3478   case Instruction::Or:
3479   case Instruction::Xor:
3480     if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) ||
3481         !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots))
3482       return false;
3483     break;
3484 
3485   // We can demote selects if we can demote their true and false values.
3486   case Instruction::Select: {
3487     SelectInst *SI = cast<SelectInst>(I);
3488     if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) ||
3489         !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots))
3490       return false;
3491     break;
3492   }
3493 
3494   // We can demote phis if we can demote all their incoming operands. Note that
3495   // we don't need to worry about cycles since we ensure single use above.
3496   case Instruction::PHI: {
3497     PHINode *PN = cast<PHINode>(I);
3498     for (Value *IncValue : PN->incoming_values())
3499       if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots))
3500         return false;
3501     break;
3502   }
3503 
3504   // Otherwise, conservatively give up.
3505   default:
3506     return false;
3507   }
3508 
3509   // Record the value that we can demote.
3510   ToDemote.push_back(V);
3511   return true;
3512 }
3513 
3514 void BoUpSLP::computeMinimumValueSizes() {
3515   // If there are no external uses, the expression tree must be rooted by a
3516   // store. We can't demote in-memory values, so there is nothing to do here.
3517   if (ExternalUses.empty())
3518     return;
3519 
3520   // We only attempt to truncate integer expressions.
3521   auto &TreeRoot = VectorizableTree[0].Scalars;
3522   auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType());
3523   if (!TreeRootIT)
3524     return;
3525 
3526   // If the expression is not rooted by a store, these roots should have
3527   // external uses. We will rely on InstCombine to rewrite the expression in
3528   // the narrower type. However, InstCombine only rewrites single-use values.
3529   // This means that if a tree entry other than a root is used externally, it
3530   // must have multiple uses and InstCombine will not rewrite it. The code
3531   // below ensures that only the roots are used externally.
3532   SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end());
3533   for (auto &EU : ExternalUses)
3534     if (!Expr.erase(EU.Scalar))
3535       return;
3536   if (!Expr.empty())
3537     return;
3538 
3539   // Collect the scalar values of the vectorizable expression. We will use this
3540   // context to determine which values can be demoted. If we see a truncation,
3541   // we mark it as seeding another demotion.
3542   for (auto &Entry : VectorizableTree)
3543     Expr.insert(Entry.Scalars.begin(), Entry.Scalars.end());
3544 
3545   // Ensure the roots of the vectorizable tree don't form a cycle. They must
3546   // have a single external user that is not in the vectorizable tree.
3547   for (auto *Root : TreeRoot)
3548     if (!Root->hasOneUse() || Expr.count(*Root->user_begin()))
3549       return;
3550 
3551   // Conservatively determine if we can actually truncate the roots of the
3552   // expression. Collect the values that can be demoted in ToDemote and
3553   // additional roots that require investigating in Roots.
3554   SmallVector<Value *, 32> ToDemote;
3555   SmallVector<Value *, 4> Roots;
3556   for (auto *Root : TreeRoot)
3557     if (!collectValuesToDemote(Root, Expr, ToDemote, Roots))
3558       return;
3559 
3560   // The maximum bit width required to represent all the values that can be
3561   // demoted without loss of precision. It would be safe to truncate the roots
3562   // of the expression to this width.
3563   auto MaxBitWidth = 8u;
3564 
3565   // We first check if all the bits of the roots are demanded. If they're not,
3566   // we can truncate the roots to this narrower type.
3567   for (auto *Root : TreeRoot) {
3568     auto Mask = DB->getDemandedBits(cast<Instruction>(Root));
3569     MaxBitWidth = std::max<unsigned>(
3570         Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth);
3571   }
3572 
3573   // True if the roots can be zero-extended back to their original type, rather
3574   // than sign-extended. We know that if the leading bits are not demanded, we
3575   // can safely zero-extend. So we initialize IsKnownPositive to True.
3576   bool IsKnownPositive = true;
3577 
3578   // If all the bits of the roots are demanded, we can try a little harder to
3579   // compute a narrower type. This can happen, for example, if the roots are
3580   // getelementptr indices. InstCombine promotes these indices to the pointer
3581   // width. Thus, all their bits are technically demanded even though the
3582   // address computation might be vectorized in a smaller type.
3583   //
3584   // We start by looking at each entry that can be demoted. We compute the
3585   // maximum bit width required to store the scalar by using ValueTracking to
3586   // compute the number of high-order bits we can truncate.
3587   if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType())) {
3588     MaxBitWidth = 8u;
3589 
3590     // Determine if the sign bit of all the roots is known to be zero. If not,
3591     // IsKnownPositive is set to False.
3592     IsKnownPositive = all_of(TreeRoot, [&](Value *R) {
3593       bool KnownZero = false;
3594       bool KnownOne = false;
3595       ComputeSignBit(R, KnownZero, KnownOne, *DL);
3596       return KnownZero;
3597     });
3598 
3599     // Determine the maximum number of bits required to store the scalar
3600     // values.
3601     for (auto *Scalar : ToDemote) {
3602       auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, 0, DT);
3603       auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType());
3604       MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth);
3605     }
3606 
3607     // If we can't prove that the sign bit is zero, we must add one to the
3608     // maximum bit width to account for the unknown sign bit. This preserves
3609     // the existing sign bit so we can safely sign-extend the root back to the
3610     // original type. Otherwise, if we know the sign bit is zero, we will
3611     // zero-extend the root instead.
3612     //
3613     // FIXME: This is somewhat suboptimal, as there will be cases where adding
3614     //        one to the maximum bit width will yield a larger-than-necessary
3615     //        type. In general, we need to add an extra bit only if we can't
3616     //        prove that the upper bit of the original type is equal to the
3617     //        upper bit of the proposed smaller type. If these two bits are the
3618     //        same (either zero or one) we know that sign-extending from the
3619     //        smaller type will result in the same value. Here, since we can't
3620     //        yet prove this, we are just making the proposed smaller type
3621     //        larger to ensure correctness.
3622     if (!IsKnownPositive)
3623       ++MaxBitWidth;
3624   }
3625 
3626   // Round MaxBitWidth up to the next power-of-two.
3627   if (!isPowerOf2_64(MaxBitWidth))
3628     MaxBitWidth = NextPowerOf2(MaxBitWidth);
3629 
3630   // If the maximum bit width we compute is less than the with of the roots'
3631   // type, we can proceed with the narrowing. Otherwise, do nothing.
3632   if (MaxBitWidth >= TreeRootIT->getBitWidth())
3633     return;
3634 
3635   // If we can truncate the root, we must collect additional values that might
3636   // be demoted as a result. That is, those seeded by truncations we will
3637   // modify.
3638   while (!Roots.empty())
3639     collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots);
3640 
3641   // Finally, map the values we can demote to the maximum bit with we computed.
3642   for (auto *Scalar : ToDemote)
3643     MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive);
3644 }
3645 
3646 namespace {
3647 /// The SLPVectorizer Pass.
3648 struct SLPVectorizer : public FunctionPass {
3649   SLPVectorizerPass Impl;
3650 
3651   /// Pass identification, replacement for typeid
3652   static char ID;
3653 
3654   explicit SLPVectorizer() : FunctionPass(ID) {
3655     initializeSLPVectorizerPass(*PassRegistry::getPassRegistry());
3656   }
3657 
3658 
3659   bool doInitialization(Module &M) override {
3660     return false;
3661   }
3662 
3663   bool runOnFunction(Function &F) override {
3664     if (skipFunction(F))
3665       return false;
3666 
3667     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
3668     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
3669     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
3670     auto *TLI = TLIP ? &TLIP->getTLI() : nullptr;
3671     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3672     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
3673     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
3674     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
3675     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
3676 
3677     return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB);
3678   }
3679 
3680   void getAnalysisUsage(AnalysisUsage &AU) const override {
3681     FunctionPass::getAnalysisUsage(AU);
3682     AU.addRequired<AssumptionCacheTracker>();
3683     AU.addRequired<ScalarEvolutionWrapperPass>();
3684     AU.addRequired<AAResultsWrapperPass>();
3685     AU.addRequired<TargetTransformInfoWrapperPass>();
3686     AU.addRequired<LoopInfoWrapperPass>();
3687     AU.addRequired<DominatorTreeWrapperPass>();
3688     AU.addRequired<DemandedBitsWrapperPass>();
3689     AU.addPreserved<LoopInfoWrapperPass>();
3690     AU.addPreserved<DominatorTreeWrapperPass>();
3691     AU.addPreserved<AAResultsWrapperPass>();
3692     AU.addPreserved<GlobalsAAWrapperPass>();
3693     AU.setPreservesCFG();
3694   }
3695 };
3696 } // end anonymous namespace
3697 
3698 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) {
3699   auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
3700   auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
3701   auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F);
3702   auto *AA = &AM.getResult<AAManager>(F);
3703   auto *LI = &AM.getResult<LoopAnalysis>(F);
3704   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
3705   auto *AC = &AM.getResult<AssumptionAnalysis>(F);
3706   auto *DB = &AM.getResult<DemandedBitsAnalysis>(F);
3707 
3708   bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB);
3709   if (!Changed)
3710     return PreservedAnalyses::all();
3711 
3712   PreservedAnalyses PA;
3713   PA.preserveSet<CFGAnalyses>();
3714   PA.preserve<AAManager>();
3715   PA.preserve<GlobalsAA>();
3716   return PA;
3717 }
3718 
3719 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
3720                                 TargetTransformInfo *TTI_,
3721                                 TargetLibraryInfo *TLI_, AliasAnalysis *AA_,
3722                                 LoopInfo *LI_, DominatorTree *DT_,
3723                                 AssumptionCache *AC_, DemandedBits *DB_) {
3724   SE = SE_;
3725   TTI = TTI_;
3726   TLI = TLI_;
3727   AA = AA_;
3728   LI = LI_;
3729   DT = DT_;
3730   AC = AC_;
3731   DB = DB_;
3732   DL = &F.getParent()->getDataLayout();
3733 
3734   Stores.clear();
3735   GEPs.clear();
3736   bool Changed = false;
3737 
3738   // If the target claims to have no vector registers don't attempt
3739   // vectorization.
3740   if (!TTI->getNumberOfRegisters(true))
3741     return false;
3742 
3743   // Don't vectorize when the attribute NoImplicitFloat is used.
3744   if (F.hasFnAttribute(Attribute::NoImplicitFloat))
3745     return false;
3746 
3747   DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
3748 
3749   // Use the bottom up slp vectorizer to construct chains that start with
3750   // store instructions.
3751   BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL);
3752 
3753   // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to
3754   // delete instructions.
3755 
3756   // Scan the blocks in the function in post order.
3757   for (auto BB : post_order(&F.getEntryBlock())) {
3758     collectSeedInstructions(BB);
3759 
3760     // Vectorize trees that end at stores.
3761     if (!Stores.empty()) {
3762       DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()
3763                    << " underlying objects.\n");
3764       Changed |= vectorizeStoreChains(R);
3765     }
3766 
3767     // Vectorize trees that end at reductions.
3768     Changed |= vectorizeChainsInBlock(BB, R);
3769 
3770     // Vectorize the index computations of getelementptr instructions. This
3771     // is primarily intended to catch gather-like idioms ending at
3772     // non-consecutive loads.
3773     if (!GEPs.empty()) {
3774       DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()
3775                    << " underlying objects.\n");
3776       Changed |= vectorizeGEPIndices(BB, R);
3777     }
3778   }
3779 
3780   if (Changed) {
3781     R.optimizeGatherSequence();
3782     DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
3783     DEBUG(verifyFunction(F));
3784   }
3785   return Changed;
3786 }
3787 
3788 /// \brief Check that the Values in the slice in VL array are still existent in
3789 /// the WeakVH array.
3790 /// Vectorization of part of the VL array may cause later values in the VL array
3791 /// to become invalid. We track when this has happened in the WeakVH array.
3792 static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, ArrayRef<WeakVH> VH,
3793                                unsigned SliceBegin, unsigned SliceSize) {
3794   VL = VL.slice(SliceBegin, SliceSize);
3795   VH = VH.slice(SliceBegin, SliceSize);
3796   return !std::equal(VL.begin(), VL.end(), VH.begin());
3797 }
3798 
3799 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
3800                                             unsigned VecRegSize) {
3801   unsigned ChainLen = Chain.size();
3802   DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen
3803         << "\n");
3804   unsigned Sz = R.getVectorElementSize(Chain[0]);
3805   unsigned VF = VecRegSize / Sz;
3806 
3807   if (!isPowerOf2_32(Sz) || VF < 2)
3808     return false;
3809 
3810   // Keep track of values that were deleted by vectorizing in the loop below.
3811   SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end());
3812 
3813   bool Changed = false;
3814   // Look for profitable vectorizable trees at all offsets, starting at zero.
3815   for (unsigned i = 0, e = ChainLen; i < e; ++i) {
3816     if (i + VF > e)
3817       break;
3818 
3819     // Check that a previous iteration of this loop did not delete the Value.
3820     if (hasValueBeenRAUWed(Chain, TrackValues, i, VF))
3821       continue;
3822 
3823     DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i
3824           << "\n");
3825     ArrayRef<Value *> Operands = Chain.slice(i, VF);
3826 
3827     R.buildTree(Operands);
3828     if (R.isTreeTinyAndNotFullyVectorizable())
3829       continue;
3830 
3831     R.computeMinimumValueSizes();
3832 
3833     int Cost = R.getTreeCost();
3834 
3835     DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n");
3836     if (Cost < -SLPCostThreshold) {
3837       DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n");
3838       R.vectorizeTree();
3839 
3840       // Move to the next bundle.
3841       i += VF - 1;
3842       Changed = true;
3843     }
3844   }
3845 
3846   return Changed;
3847 }
3848 
3849 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores,
3850                                         BoUpSLP &R) {
3851   SetVector<StoreInst *> Heads, Tails;
3852   SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain;
3853 
3854   // We may run into multiple chains that merge into a single chain. We mark the
3855   // stores that we vectorized so that we don't visit the same store twice.
3856   BoUpSLP::ValueSet VectorizedStores;
3857   bool Changed = false;
3858 
3859   // Do a quadratic search on all of the given stores and find
3860   // all of the pairs of stores that follow each other.
3861   SmallVector<unsigned, 16> IndexQueue;
3862   for (unsigned i = 0, e = Stores.size(); i < e; ++i) {
3863     IndexQueue.clear();
3864     // If a store has multiple consecutive store candidates, search Stores
3865     // array according to the sequence: from i+1 to e, then from i-1 to 0.
3866     // This is because usually pairing with immediate succeeding or preceding
3867     // candidate create the best chance to find slp vectorization opportunity.
3868     unsigned j = 0;
3869     for (j = i + 1; j < e; ++j)
3870       IndexQueue.push_back(j);
3871     for (j = i; j > 0; --j)
3872       IndexQueue.push_back(j - 1);
3873 
3874     for (auto &k : IndexQueue) {
3875       if (isConsecutiveAccess(Stores[i], Stores[k], *DL, *SE)) {
3876         Tails.insert(Stores[k]);
3877         Heads.insert(Stores[i]);
3878         ConsecutiveChain[Stores[i]] = Stores[k];
3879         break;
3880       }
3881     }
3882   }
3883 
3884   // For stores that start but don't end a link in the chain:
3885   for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end();
3886        it != e; ++it) {
3887     if (Tails.count(*it))
3888       continue;
3889 
3890     // We found a store instr that starts a chain. Now follow the chain and try
3891     // to vectorize it.
3892     BoUpSLP::ValueList Operands;
3893     StoreInst *I = *it;
3894     // Collect the chain into a list.
3895     while (Tails.count(I) || Heads.count(I)) {
3896       if (VectorizedStores.count(I))
3897         break;
3898       Operands.push_back(I);
3899       // Move to the next value in the chain.
3900       I = ConsecutiveChain[I];
3901     }
3902 
3903     // FIXME: Is division-by-2 the correct step? Should we assert that the
3904     // register size is a power-of-2?
3905     for (unsigned Size = R.getMaxVecRegSize(); Size >= R.getMinVecRegSize();
3906          Size /= 2) {
3907       if (vectorizeStoreChain(Operands, R, Size)) {
3908         // Mark the vectorized stores so that we don't vectorize them again.
3909         VectorizedStores.insert(Operands.begin(), Operands.end());
3910         Changed = true;
3911         break;
3912       }
3913     }
3914   }
3915 
3916   return Changed;
3917 }
3918 
3919 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) {
3920 
3921   // Initialize the collections. We will make a single pass over the block.
3922   Stores.clear();
3923   GEPs.clear();
3924 
3925   // Visit the store and getelementptr instructions in BB and organize them in
3926   // Stores and GEPs according to the underlying objects of their pointer
3927   // operands.
3928   for (Instruction &I : *BB) {
3929 
3930     // Ignore store instructions that are volatile or have a pointer operand
3931     // that doesn't point to a scalar type.
3932     if (auto *SI = dyn_cast<StoreInst>(&I)) {
3933       if (!SI->isSimple())
3934         continue;
3935       if (!isValidElementType(SI->getValueOperand()->getType()))
3936         continue;
3937       Stores[GetUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI);
3938     }
3939 
3940     // Ignore getelementptr instructions that have more than one index, a
3941     // constant index, or a pointer operand that doesn't point to a scalar
3942     // type.
3943     else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
3944       auto Idx = GEP->idx_begin()->get();
3945       if (GEP->getNumIndices() > 1 || isa<Constant>(Idx))
3946         continue;
3947       if (!isValidElementType(Idx->getType()))
3948         continue;
3949       if (GEP->getType()->isVectorTy())
3950         continue;
3951       GEPs[GetUnderlyingObject(GEP->getPointerOperand(), *DL)].push_back(GEP);
3952     }
3953   }
3954 }
3955 
3956 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) {
3957   if (!A || !B)
3958     return false;
3959   Value *VL[] = { A, B };
3960   return tryToVectorizeList(VL, R, None, true);
3961 }
3962 
3963 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
3964                                            ArrayRef<Value *> BuildVector,
3965                                            bool AllowReorder) {
3966   if (VL.size() < 2)
3967     return false;
3968 
3969   DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " << VL.size()
3970                << ".\n");
3971 
3972   // Check that all of the parts are scalar instructions of the same type.
3973   Instruction *I0 = dyn_cast<Instruction>(VL[0]);
3974   if (!I0)
3975     return false;
3976 
3977   unsigned Opcode0 = I0->getOpcode();
3978 
3979   unsigned Sz = R.getVectorElementSize(I0);
3980   unsigned MinVF = std::max(2U, R.getMinVecRegSize() / Sz);
3981   unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF);
3982   if (MaxVF < 2)
3983     return false;
3984 
3985   for (Value *V : VL) {
3986     Type *Ty = V->getType();
3987     if (!isValidElementType(Ty))
3988       return false;
3989     Instruction *Inst = dyn_cast<Instruction>(V);
3990     if (!Inst || Inst->getOpcode() != Opcode0)
3991       return false;
3992   }
3993 
3994   bool Changed = false;
3995 
3996   // Keep track of values that were deleted by vectorizing in the loop below.
3997   SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end());
3998 
3999   unsigned NextInst = 0, MaxInst = VL.size();
4000   for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF;
4001        VF /= 2) {
4002     // No actual vectorization should happen, if number of parts is the same as
4003     // provided vectorization factor (i.e. the scalar type is used for vector
4004     // code during codegen).
4005     auto *VecTy = VectorType::get(VL[0]->getType(), VF);
4006     if (TTI->getNumberOfParts(VecTy) == VF)
4007       continue;
4008     for (unsigned I = NextInst; I < MaxInst; ++I) {
4009       unsigned OpsWidth = 0;
4010 
4011       if (I + VF > MaxInst)
4012         OpsWidth = MaxInst - I;
4013       else
4014         OpsWidth = VF;
4015 
4016       if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2)
4017         break;
4018 
4019       // Check that a previous iteration of this loop did not delete the Value.
4020       if (hasValueBeenRAUWed(VL, TrackValues, I, OpsWidth))
4021         continue;
4022 
4023       DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "
4024                    << "\n");
4025       ArrayRef<Value *> Ops = VL.slice(I, OpsWidth);
4026 
4027       ArrayRef<Value *> BuildVectorSlice;
4028       if (!BuildVector.empty())
4029         BuildVectorSlice = BuildVector.slice(I, OpsWidth);
4030 
4031       R.buildTree(Ops, BuildVectorSlice);
4032       // TODO: check if we can allow reordering for more cases.
4033       if (AllowReorder && R.shouldReorder()) {
4034         // Conceptually, there is nothing actually preventing us from trying to
4035         // reorder a larger list. In fact, we do exactly this when vectorizing
4036         // reductions. However, at this point, we only expect to get here from
4037         // tryToVectorizePair().
4038         assert(Ops.size() == 2);
4039         assert(BuildVectorSlice.empty());
4040         Value *ReorderedOps[] = {Ops[1], Ops[0]};
4041         R.buildTree(ReorderedOps, None);
4042       }
4043       if (R.isTreeTinyAndNotFullyVectorizable())
4044         continue;
4045 
4046       R.computeMinimumValueSizes();
4047       int Cost = R.getTreeCost();
4048 
4049       if (Cost < -SLPCostThreshold) {
4050         DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n");
4051         Value *VectorizedRoot = R.vectorizeTree();
4052 
4053         // Reconstruct the build vector by extracting the vectorized root. This
4054         // way we handle the case where some elements of the vector are
4055         // undefined.
4056         //  (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2))
4057         if (!BuildVectorSlice.empty()) {
4058           // The insert point is the last build vector instruction. The
4059           // vectorized root will precede it. This guarantees that we get an
4060           // instruction. The vectorized tree could have been constant folded.
4061           Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back());
4062           unsigned VecIdx = 0;
4063           for (auto &V : BuildVectorSlice) {
4064             IRBuilder<NoFolder> Builder(InsertAfter->getParent(),
4065                                         ++BasicBlock::iterator(InsertAfter));
4066             Instruction *I = cast<Instruction>(V);
4067             assert(isa<InsertElementInst>(I) || isa<InsertValueInst>(I));
4068             Instruction *Extract =
4069                 cast<Instruction>(Builder.CreateExtractElement(
4070                     VectorizedRoot, Builder.getInt32(VecIdx++)));
4071             I->setOperand(1, Extract);
4072             I->removeFromParent();
4073             I->insertAfter(Extract);
4074             InsertAfter = I;
4075           }
4076         }
4077         // Move to the next bundle.
4078         I += VF - 1;
4079         NextInst = I + 1;
4080         Changed = true;
4081       }
4082     }
4083   }
4084 
4085   return Changed;
4086 }
4087 
4088 bool SLPVectorizerPass::tryToVectorize(BinaryOperator *V, BoUpSLP &R) {
4089   if (!V)
4090     return false;
4091 
4092   Value *P = V->getParent();
4093 
4094   // Vectorize in current basic block only.
4095   auto *Op0 = dyn_cast<Instruction>(V->getOperand(0));
4096   auto *Op1 = dyn_cast<Instruction>(V->getOperand(1));
4097   if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P)
4098     return false;
4099 
4100   // Try to vectorize V.
4101   if (tryToVectorizePair(Op0, Op1, R))
4102     return true;
4103 
4104   auto *A = dyn_cast<BinaryOperator>(Op0);
4105   auto *B = dyn_cast<BinaryOperator>(Op1);
4106   // Try to skip B.
4107   if (B && B->hasOneUse()) {
4108     auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0));
4109     auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1));
4110     if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R))
4111       return true;
4112     if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R))
4113       return true;
4114   }
4115 
4116   // Try to skip A.
4117   if (A && A->hasOneUse()) {
4118     auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0));
4119     auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1));
4120     if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R))
4121       return true;
4122     if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R))
4123       return true;
4124   }
4125   return false;
4126 }
4127 
4128 /// \brief Generate a shuffle mask to be used in a reduction tree.
4129 ///
4130 /// \param VecLen The length of the vector to be reduced.
4131 /// \param NumEltsToRdx The number of elements that should be reduced in the
4132 ///        vector.
4133 /// \param IsPairwise Whether the reduction is a pairwise or splitting
4134 ///        reduction. A pairwise reduction will generate a mask of
4135 ///        <0,2,...> or <1,3,..> while a splitting reduction will generate
4136 ///        <2,3, undef,undef> for a vector of 4 and NumElts = 2.
4137 /// \param IsLeft True will generate a mask of even elements, odd otherwise.
4138 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx,
4139                                    bool IsPairwise, bool IsLeft,
4140                                    IRBuilder<> &Builder) {
4141   assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask");
4142 
4143   SmallVector<Constant *, 32> ShuffleMask(
4144       VecLen, UndefValue::get(Builder.getInt32Ty()));
4145 
4146   if (IsPairwise)
4147     // Build a mask of 0, 2, ... (left) or 1, 3, ... (right).
4148     for (unsigned i = 0; i != NumEltsToRdx; ++i)
4149       ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft);
4150   else
4151     // Move the upper half of the vector to the lower half.
4152     for (unsigned i = 0; i != NumEltsToRdx; ++i)
4153       ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i);
4154 
4155   return ConstantVector::get(ShuffleMask);
4156 }
4157 
4158 namespace {
4159 /// Model horizontal reductions.
4160 ///
4161 /// A horizontal reduction is a tree of reduction operations (currently add and
4162 /// fadd) that has operations that can be put into a vector as its leaf.
4163 /// For example, this tree:
4164 ///
4165 /// mul mul mul mul
4166 ///  \  /    \  /
4167 ///   +       +
4168 ///    \     /
4169 ///       +
4170 /// This tree has "mul" as its reduced values and "+" as its reduction
4171 /// operations. A reduction might be feeding into a store or a binary operation
4172 /// feeding a phi.
4173 ///    ...
4174 ///    \  /
4175 ///     +
4176 ///     |
4177 ///  phi +=
4178 ///
4179 ///  Or:
4180 ///    ...
4181 ///    \  /
4182 ///     +
4183 ///     |
4184 ///   *p =
4185 ///
4186 class HorizontalReduction {
4187   SmallVector<Value *, 16> ReductionOps;
4188   SmallVector<Value *, 32> ReducedVals;
4189 
4190   BinaryOperator *ReductionRoot = nullptr;
4191   // After successfull horizontal reduction vectorization attempt for PHI node
4192   // vectorizer tries to update root binary op by combining vectorized tree and
4193   // the ReductionPHI node. But during vectorization this ReductionPHI can be
4194   // vectorized itself and replaced by the undef value, while the instruction
4195   // itself is marked for deletion. This 'marked for deletion' PHI node then can
4196   // be used in new binary operation, causing "Use still stuck around after Def
4197   // is destroyed" crash upon PHI node deletion.
4198   WeakVH ReductionPHI;
4199 
4200   /// The opcode of the reduction.
4201   Instruction::BinaryOps ReductionOpcode = Instruction::BinaryOpsEnd;
4202   /// The opcode of the values we perform a reduction on.
4203   unsigned ReducedValueOpcode = 0;
4204   /// Should we model this reduction as a pairwise reduction tree or a tree that
4205   /// splits the vector in halves and adds those halves.
4206   bool IsPairwiseReduction = false;
4207 
4208 public:
4209   HorizontalReduction() = default;
4210 
4211   /// \brief Try to find a reduction tree.
4212   bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B) {
4213     assert((!Phi || is_contained(Phi->operands(), B)) &&
4214            "Thi phi needs to use the binary operator");
4215 
4216     // We could have a initial reductions that is not an add.
4217     //  r *= v1 + v2 + v3 + v4
4218     // In such a case start looking for a tree rooted in the first '+'.
4219     if (Phi) {
4220       if (B->getOperand(0) == Phi) {
4221         Phi = nullptr;
4222         B = dyn_cast<BinaryOperator>(B->getOperand(1));
4223       } else if (B->getOperand(1) == Phi) {
4224         Phi = nullptr;
4225         B = dyn_cast<BinaryOperator>(B->getOperand(0));
4226       }
4227     }
4228 
4229     if (!B)
4230       return false;
4231 
4232     Type *Ty = B->getType();
4233     if (!isValidElementType(Ty))
4234       return false;
4235 
4236     ReductionOpcode = B->getOpcode();
4237     ReducedValueOpcode = 0;
4238     ReductionRoot = B;
4239     ReductionPHI = Phi;
4240 
4241     // We currently only support adds.
4242     if ((ReductionOpcode != Instruction::Add &&
4243          ReductionOpcode != Instruction::FAdd) ||
4244         !B->isAssociative())
4245       return false;
4246 
4247     // Post order traverse the reduction tree starting at B. We only handle true
4248     // trees containing only binary operators or selects.
4249     SmallVector<std::pair<Instruction *, unsigned>, 32> Stack;
4250     Stack.push_back(std::make_pair(B, 0));
4251     while (!Stack.empty()) {
4252       Instruction *TreeN = Stack.back().first;
4253       unsigned EdgeToVist = Stack.back().second++;
4254       bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode;
4255 
4256       // Postorder vist.
4257       if (EdgeToVist == 2 || IsReducedValue) {
4258         if (IsReducedValue)
4259           ReducedVals.push_back(TreeN);
4260         else
4261           ReductionOps.push_back(TreeN);
4262         // Retract.
4263         Stack.pop_back();
4264         continue;
4265       }
4266 
4267       // Visit left or right.
4268       Value *NextV = TreeN->getOperand(EdgeToVist);
4269       if (NextV != Phi) {
4270         auto *I = dyn_cast<Instruction>(NextV);
4271         // Continue analysis if the next operand is a reduction operation or
4272         // (possibly) a reduced value. If the reduced value opcode is not set,
4273         // the first met operation != reduction operation is considered as the
4274         // reduced value class.
4275         if (I && (!ReducedValueOpcode || I->getOpcode() == ReducedValueOpcode ||
4276                   I->getOpcode() == ReductionOpcode)) {
4277           // Only handle trees in the current basic block.
4278           if (I->getParent() != B->getParent())
4279             return false;
4280 
4281           // Each tree node needs to have one user except for the ultimate
4282           // reduction.
4283           if (!I->hasOneUse() && I != B)
4284             return false;
4285 
4286           if (I->getOpcode() == ReductionOpcode) {
4287             // We need to be able to reassociate the reduction operations.
4288             if (!I->isAssociative())
4289               return false;
4290           } else if (ReducedValueOpcode &&
4291                      ReducedValueOpcode != I->getOpcode()) {
4292             // Make sure that the opcodes of the operations that we are going to
4293             // reduce match.
4294             return false;
4295           } else if (!ReducedValueOpcode)
4296             ReducedValueOpcode = I->getOpcode();
4297 
4298           Stack.push_back(std::make_pair(I, 0));
4299           continue;
4300         }
4301         return false;
4302       }
4303     }
4304     return true;
4305   }
4306 
4307   /// \brief Attempt to vectorize the tree found by
4308   /// matchAssociativeReduction.
4309   bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) {
4310     if (ReducedVals.empty())
4311       return false;
4312 
4313     // If there is a sufficient number of reduction values, reduce
4314     // to a nearby power-of-2. Can safely generate oversized
4315     // vectors and rely on the backend to split them to legal sizes.
4316     unsigned NumReducedVals = ReducedVals.size();
4317     if (NumReducedVals < 4)
4318       return false;
4319 
4320     unsigned ReduxWidth = PowerOf2Floor(NumReducedVals);
4321 
4322     Value *VectorizedTree = nullptr;
4323     IRBuilder<> Builder(ReductionRoot);
4324     FastMathFlags Unsafe;
4325     Unsafe.setUnsafeAlgebra();
4326     Builder.setFastMathFlags(Unsafe);
4327     unsigned i = 0;
4328 
4329     while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) {
4330       auto VL = makeArrayRef(&ReducedVals[i], ReduxWidth);
4331       V.buildTree(VL, ReductionOps);
4332       if (V.shouldReorder()) {
4333         SmallVector<Value *, 8> Reversed(VL.rbegin(), VL.rend());
4334         V.buildTree(Reversed, ReductionOps);
4335       }
4336       if (V.isTreeTinyAndNotFullyVectorizable())
4337         break;
4338 
4339       V.computeMinimumValueSizes();
4340 
4341       // Estimate cost.
4342       int Cost =
4343           V.getTreeCost() + getReductionCost(TTI, ReducedVals[i], ReduxWidth);
4344       if (Cost >= -SLPCostThreshold)
4345         break;
4346 
4347       DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost
4348                    << ". (HorRdx)\n");
4349 
4350       // Vectorize a tree.
4351       DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc();
4352       Value *VectorizedRoot = V.vectorizeTree();
4353 
4354       // Emit a reduction.
4355       Value *ReducedSubTree =
4356           emitReduction(VectorizedRoot, Builder, ReduxWidth);
4357       if (VectorizedTree) {
4358         Builder.SetCurrentDebugLocation(Loc);
4359         VectorizedTree = Builder.CreateBinOp(ReductionOpcode, VectorizedTree,
4360                                              ReducedSubTree, "bin.rdx");
4361       } else
4362         VectorizedTree = ReducedSubTree;
4363       i += ReduxWidth;
4364       ReduxWidth = PowerOf2Floor(NumReducedVals - i);
4365     }
4366 
4367     if (VectorizedTree) {
4368       // Finish the reduction.
4369       for (; i < NumReducedVals; ++i) {
4370         Builder.SetCurrentDebugLocation(
4371           cast<Instruction>(ReducedVals[i])->getDebugLoc());
4372         VectorizedTree = Builder.CreateBinOp(ReductionOpcode, VectorizedTree,
4373                                              ReducedVals[i]);
4374       }
4375       // Update users.
4376       if (ReductionPHI && !isa<UndefValue>(ReductionPHI)) {
4377         assert(ReductionRoot && "Need a reduction operation");
4378         ReductionRoot->setOperand(0, VectorizedTree);
4379         ReductionRoot->setOperand(1, ReductionPHI);
4380       } else
4381         ReductionRoot->replaceAllUsesWith(VectorizedTree);
4382     }
4383     return VectorizedTree != nullptr;
4384   }
4385 
4386   unsigned numReductionValues() const {
4387     return ReducedVals.size();
4388   }
4389 
4390 private:
4391   /// \brief Calculate the cost of a reduction.
4392   int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal,
4393                        unsigned ReduxWidth) {
4394     Type *ScalarTy = FirstReducedVal->getType();
4395     Type *VecTy = VectorType::get(ScalarTy, ReduxWidth);
4396 
4397     int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true);
4398     int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false);
4399 
4400     IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost;
4401     int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost;
4402 
4403     int ScalarReduxCost =
4404         (ReduxWidth - 1) *
4405         TTI->getArithmeticInstrCost(ReductionOpcode, ScalarTy);
4406 
4407     DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost
4408                  << " for reduction that starts with " << *FirstReducedVal
4409                  << " (It is a "
4410                  << (IsPairwiseReduction ? "pairwise" : "splitting")
4411                  << " reduction)\n");
4412 
4413     return VecReduxCost - ScalarReduxCost;
4414   }
4415 
4416   /// \brief Emit a horizontal reduction of the vectorized value.
4417   Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder,
4418                        unsigned ReduxWidth) {
4419     assert(VectorizedValue && "Need to have a vectorized tree node");
4420     assert(isPowerOf2_32(ReduxWidth) &&
4421            "We only handle power-of-two reductions for now");
4422 
4423     Value *TmpVec = VectorizedValue;
4424     for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) {
4425       if (IsPairwiseReduction) {
4426         Value *LeftMask =
4427           createRdxShuffleMask(ReduxWidth, i, true, true, Builder);
4428         Value *RightMask =
4429           createRdxShuffleMask(ReduxWidth, i, true, false, Builder);
4430 
4431         Value *LeftShuf = Builder.CreateShuffleVector(
4432           TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l");
4433         Value *RightShuf = Builder.CreateShuffleVector(
4434           TmpVec, UndefValue::get(TmpVec->getType()), (RightMask),
4435           "rdx.shuf.r");
4436         TmpVec = Builder.CreateBinOp(ReductionOpcode, LeftShuf, RightShuf,
4437                                      "bin.rdx");
4438       } else {
4439         Value *UpperHalf =
4440           createRdxShuffleMask(ReduxWidth, i, false, false, Builder);
4441         Value *Shuf = Builder.CreateShuffleVector(
4442           TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf");
4443         TmpVec = Builder.CreateBinOp(ReductionOpcode, TmpVec, Shuf, "bin.rdx");
4444       }
4445     }
4446 
4447     // The result is in the first element of the vector.
4448     return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0));
4449   }
4450 };
4451 } // end anonymous namespace
4452 
4453 /// \brief Recognize construction of vectors like
4454 ///  %ra = insertelement <4 x float> undef, float %s0, i32 0
4455 ///  %rb = insertelement <4 x float> %ra, float %s1, i32 1
4456 ///  %rc = insertelement <4 x float> %rb, float %s2, i32 2
4457 ///  %rd = insertelement <4 x float> %rc, float %s3, i32 3
4458 ///
4459 /// Returns true if it matches
4460 ///
4461 static bool findBuildVector(InsertElementInst *FirstInsertElem,
4462                             SmallVectorImpl<Value *> &BuildVector,
4463                             SmallVectorImpl<Value *> &BuildVectorOpds) {
4464   if (!isa<UndefValue>(FirstInsertElem->getOperand(0)))
4465     return false;
4466 
4467   InsertElementInst *IE = FirstInsertElem;
4468   while (true) {
4469     BuildVector.push_back(IE);
4470     BuildVectorOpds.push_back(IE->getOperand(1));
4471 
4472     if (IE->use_empty())
4473       return false;
4474 
4475     InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back());
4476     if (!NextUse)
4477       return true;
4478 
4479     // If this isn't the final use, make sure the next insertelement is the only
4480     // use. It's OK if the final constructed vector is used multiple times
4481     if (!IE->hasOneUse())
4482       return false;
4483 
4484     IE = NextUse;
4485   }
4486 
4487   return false;
4488 }
4489 
4490 /// \brief Like findBuildVector, but looks backwards for construction of aggregate.
4491 ///
4492 /// \return true if it matches.
4493 static bool findBuildAggregate(InsertValueInst *IV,
4494                                SmallVectorImpl<Value *> &BuildVector,
4495                                SmallVectorImpl<Value *> &BuildVectorOpds) {
4496   if (!IV->hasOneUse())
4497     return false;
4498   Value *V = IV->getAggregateOperand();
4499   if (!isa<UndefValue>(V)) {
4500     InsertValueInst *I = dyn_cast<InsertValueInst>(V);
4501     if (!I || !findBuildAggregate(I, BuildVector, BuildVectorOpds))
4502       return false;
4503   }
4504   BuildVector.push_back(IV);
4505   BuildVectorOpds.push_back(IV->getInsertedValueOperand());
4506   return true;
4507 }
4508 
4509 static bool PhiTypeSorterFunc(Value *V, Value *V2) {
4510   return V->getType() < V2->getType();
4511 }
4512 
4513 /// \brief Try and get a reduction value from a phi node.
4514 ///
4515 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions
4516 /// if they come from either \p ParentBB or a containing loop latch.
4517 ///
4518 /// \returns A candidate reduction value if possible, or \code nullptr \endcode
4519 /// if not possible.
4520 static Value *getReductionValue(const DominatorTree *DT, PHINode *P,
4521                                 BasicBlock *ParentBB, LoopInfo *LI) {
4522   // There are situations where the reduction value is not dominated by the
4523   // reduction phi. Vectorizing such cases has been reported to cause
4524   // miscompiles. See PR25787.
4525   auto DominatedReduxValue = [&](Value *R) {
4526     return (
4527         dyn_cast<Instruction>(R) &&
4528         DT->dominates(P->getParent(), dyn_cast<Instruction>(R)->getParent()));
4529   };
4530 
4531   Value *Rdx = nullptr;
4532 
4533   // Return the incoming value if it comes from the same BB as the phi node.
4534   if (P->getIncomingBlock(0) == ParentBB) {
4535     Rdx = P->getIncomingValue(0);
4536   } else if (P->getIncomingBlock(1) == ParentBB) {
4537     Rdx = P->getIncomingValue(1);
4538   }
4539 
4540   if (Rdx && DominatedReduxValue(Rdx))
4541     return Rdx;
4542 
4543   // Otherwise, check whether we have a loop latch to look at.
4544   Loop *BBL = LI->getLoopFor(ParentBB);
4545   if (!BBL)
4546     return nullptr;
4547   BasicBlock *BBLatch = BBL->getLoopLatch();
4548   if (!BBLatch)
4549     return nullptr;
4550 
4551   // There is a loop latch, return the incoming value if it comes from
4552   // that. This reduction pattern occasionally turns up.
4553   if (P->getIncomingBlock(0) == BBLatch) {
4554     Rdx = P->getIncomingValue(0);
4555   } else if (P->getIncomingBlock(1) == BBLatch) {
4556     Rdx = P->getIncomingValue(1);
4557   }
4558 
4559   if (Rdx && DominatedReduxValue(Rdx))
4560     return Rdx;
4561 
4562   return nullptr;
4563 }
4564 
4565 namespace {
4566 /// Tracks instructons and its children.
4567 class WeakVHWithLevel final : public CallbackVH {
4568   /// Operand index of the instruction currently beeing analized.
4569   unsigned Level = 0;
4570   /// Is this the instruction that should be vectorized, or are we now
4571   /// processing children (i.e. operands of this instruction) for potential
4572   /// vectorization?
4573   bool IsInitial = true;
4574 
4575 public:
4576   explicit WeakVHWithLevel() = default;
4577   WeakVHWithLevel(Value *V) : CallbackVH(V){};
4578   /// Restart children analysis each time it is repaced by the new instruction.
4579   void allUsesReplacedWith(Value *New) override {
4580     setValPtr(New);
4581     Level = 0;
4582     IsInitial = true;
4583   }
4584   /// Check if the instruction was not deleted during vectorization.
4585   bool isValid() const { return !getValPtr(); }
4586   /// Is the istruction itself must be vectorized?
4587   bool isInitial() const { return IsInitial; }
4588   /// Try to vectorize children.
4589   void clearInitial() { IsInitial = false; }
4590   /// Are all children processed already?
4591   bool isFinal() const {
4592     assert(getValPtr() &&
4593            (isa<Instruction>(getValPtr()) &&
4594             cast<Instruction>(getValPtr())->getNumOperands() >= Level));
4595     return getValPtr() &&
4596            cast<Instruction>(getValPtr())->getNumOperands() == Level;
4597   }
4598   /// Get next child operation.
4599   Value *nextOperand() {
4600     assert(getValPtr() && isa<Instruction>(getValPtr()) &&
4601            cast<Instruction>(getValPtr())->getNumOperands() > Level);
4602     return cast<Instruction>(getValPtr())->getOperand(Level++);
4603   }
4604   virtual ~WeakVHWithLevel() = default;
4605 };
4606 } // namespace
4607 
4608 /// \brief Attempt to reduce a horizontal reduction.
4609 /// If it is legal to match a horizontal reduction feeding
4610 /// the phi node P with reduction operators Root in a basic block BB, then check
4611 /// if it can be done.
4612 /// \returns true if a horizontal reduction was matched and reduced.
4613 /// \returns false if a horizontal reduction was not matched.
4614 static bool canBeVectorized(
4615     PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R,
4616     TargetTransformInfo *TTI,
4617     const function_ref<bool(BinaryOperator *, BoUpSLP &)> Vectorize) {
4618   if (!ShouldVectorizeHor)
4619     return false;
4620 
4621   if (!Root)
4622     return false;
4623 
4624   if (Root->getParent() != BB)
4625     return false;
4626   SmallVector<WeakVHWithLevel, 8> Stack(1, Root);
4627   SmallSet<Value *, 8> VisitedInstrs;
4628   bool Res = false;
4629   while (!Stack.empty()) {
4630     Value *V = Stack.back();
4631     if (!V) {
4632       Stack.pop_back();
4633       continue;
4634     }
4635     auto *Inst = dyn_cast<Instruction>(V);
4636     if (!Inst || isa<PHINode>(Inst)) {
4637       Stack.pop_back();
4638       continue;
4639     }
4640     if (Stack.back().isInitial()) {
4641       Stack.back().clearInitial();
4642       if (auto *BI = dyn_cast<BinaryOperator>(Inst)) {
4643         HorizontalReduction HorRdx;
4644         if (HorRdx.matchAssociativeReduction(P, BI)) {
4645           if (HorRdx.tryToReduce(R, TTI)) {
4646             Res = true;
4647             P = nullptr;
4648             continue;
4649           }
4650         }
4651         if (P) {
4652           Inst = dyn_cast<Instruction>(BI->getOperand(0));
4653           if (Inst == P)
4654             Inst = dyn_cast<Instruction>(BI->getOperand(1));
4655           if (!Inst) {
4656             P = nullptr;
4657             continue;
4658           }
4659         }
4660       }
4661       P = nullptr;
4662       if (Vectorize(dyn_cast<BinaryOperator>(Inst), R)) {
4663         Res = true;
4664         continue;
4665       }
4666     }
4667     if (Stack.back().isFinal()) {
4668       Stack.pop_back();
4669       continue;
4670     }
4671 
4672     if (auto *NextV = dyn_cast<Instruction>(Stack.back().nextOperand()))
4673       if (NextV->getParent() == BB && VisitedInstrs.insert(NextV).second &&
4674           Stack.size() < RecursionMaxDepth)
4675         Stack.push_back(NextV);
4676   }
4677   return Res;
4678 }
4679 
4680 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V,
4681                                                  BasicBlock *BB, BoUpSLP &R,
4682                                                  TargetTransformInfo *TTI) {
4683   if (!V)
4684     return false;
4685   auto *I = dyn_cast<Instruction>(V);
4686   if (!I)
4687     return false;
4688 
4689   if (!isa<BinaryOperator>(I))
4690     P = nullptr;
4691   // Try to match and vectorize a horizontal reduction.
4692   return canBeVectorized(P, I, BB, R, TTI,
4693                          [this](BinaryOperator *BI, BoUpSLP &R) -> bool {
4694                            return tryToVectorize(BI, R);
4695                          });
4696 }
4697 
4698 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
4699   bool Changed = false;
4700   SmallVector<Value *, 4> Incoming;
4701   SmallSet<Value *, 16> VisitedInstrs;
4702 
4703   bool HaveVectorizedPhiNodes = true;
4704   while (HaveVectorizedPhiNodes) {
4705     HaveVectorizedPhiNodes = false;
4706 
4707     // Collect the incoming values from the PHIs.
4708     Incoming.clear();
4709     for (Instruction &I : *BB) {
4710       PHINode *P = dyn_cast<PHINode>(&I);
4711       if (!P)
4712         break;
4713 
4714       if (!VisitedInstrs.count(P))
4715         Incoming.push_back(P);
4716     }
4717 
4718     // Sort by type.
4719     std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc);
4720 
4721     // Try to vectorize elements base on their type.
4722     for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(),
4723                                            E = Incoming.end();
4724          IncIt != E;) {
4725 
4726       // Look for the next elements with the same type.
4727       SmallVector<Value *, 4>::iterator SameTypeIt = IncIt;
4728       while (SameTypeIt != E &&
4729              (*SameTypeIt)->getType() == (*IncIt)->getType()) {
4730         VisitedInstrs.insert(*SameTypeIt);
4731         ++SameTypeIt;
4732       }
4733 
4734       // Try to vectorize them.
4735       unsigned NumElts = (SameTypeIt - IncIt);
4736       DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n");
4737       if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R)) {
4738         // Success start over because instructions might have been changed.
4739         HaveVectorizedPhiNodes = true;
4740         Changed = true;
4741         break;
4742       }
4743 
4744       // Start over at the next instruction of a different type (or the end).
4745       IncIt = SameTypeIt;
4746     }
4747   }
4748 
4749   VisitedInstrs.clear();
4750 
4751   for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) {
4752     // We may go through BB multiple times so skip the one we have checked.
4753     if (!VisitedInstrs.insert(&*it).second)
4754       continue;
4755 
4756     if (isa<DbgInfoIntrinsic>(it))
4757       continue;
4758 
4759     // Try to vectorize reductions that use PHINodes.
4760     if (PHINode *P = dyn_cast<PHINode>(it)) {
4761       // Check that the PHI is a reduction PHI.
4762       if (P->getNumIncomingValues() != 2)
4763         return Changed;
4764 
4765       // Try to match and vectorize a horizontal reduction.
4766       if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R,
4767                                    TTI)) {
4768         Changed = true;
4769         it = BB->begin();
4770         e = BB->end();
4771         continue;
4772       }
4773       continue;
4774     }
4775 
4776     if (ShouldStartVectorizeHorAtStore) {
4777       if (StoreInst *SI = dyn_cast<StoreInst>(it)) {
4778         // Try to match and vectorize a horizontal reduction.
4779         if (vectorizeRootInstruction(nullptr, SI->getValueOperand(), BB, R,
4780                                      TTI)) {
4781           Changed = true;
4782           it = BB->begin();
4783           e = BB->end();
4784           continue;
4785         }
4786       }
4787     }
4788 
4789     // Try to vectorize horizontal reductions feeding into a return.
4790     if (ReturnInst *RI = dyn_cast<ReturnInst>(it)) {
4791       if (RI->getNumOperands() != 0) {
4792         // Try to match and vectorize a horizontal reduction.
4793         if (vectorizeRootInstruction(nullptr, RI->getOperand(0), BB, R, TTI)) {
4794           Changed = true;
4795           it = BB->begin();
4796           e = BB->end();
4797           continue;
4798         }
4799       }
4800     }
4801 
4802     // Try to vectorize trees that start at compare instructions.
4803     if (CmpInst *CI = dyn_cast<CmpInst>(it)) {
4804       if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) {
4805         Changed = true;
4806         // We would like to start over since some instructions are deleted
4807         // and the iterator may become invalid value.
4808         it = BB->begin();
4809         e = BB->end();
4810         continue;
4811       }
4812 
4813       for (int I = 0; I < 2; ++I) {
4814         if (vectorizeRootInstruction(nullptr, CI->getOperand(I), BB, R, TTI)) {
4815           Changed = true;
4816           // We would like to start over since some instructions are deleted
4817           // and the iterator may become invalid value.
4818           it = BB->begin();
4819           e = BB->end();
4820           break;
4821         }
4822       }
4823       continue;
4824     }
4825 
4826     // Try to vectorize trees that start at insertelement instructions.
4827     if (InsertElementInst *FirstInsertElem = dyn_cast<InsertElementInst>(it)) {
4828       SmallVector<Value *, 16> BuildVector;
4829       SmallVector<Value *, 16> BuildVectorOpds;
4830       if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds))
4831         continue;
4832 
4833       // Vectorize starting with the build vector operands ignoring the
4834       // BuildVector instructions for the purpose of scheduling and user
4835       // extraction.
4836       if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) {
4837         Changed = true;
4838         it = BB->begin();
4839         e = BB->end();
4840       }
4841 
4842       continue;
4843     }
4844 
4845     // Try to vectorize trees that start at insertvalue instructions feeding into
4846     // a store.
4847     if (StoreInst *SI = dyn_cast<StoreInst>(it)) {
4848       if (InsertValueInst *LastInsertValue = dyn_cast<InsertValueInst>(SI->getValueOperand())) {
4849         const DataLayout &DL = BB->getModule()->getDataLayout();
4850         if (R.canMapToVector(SI->getValueOperand()->getType(), DL)) {
4851           SmallVector<Value *, 16> BuildVector;
4852           SmallVector<Value *, 16> BuildVectorOpds;
4853           if (!findBuildAggregate(LastInsertValue, BuildVector, BuildVectorOpds))
4854             continue;
4855 
4856           DEBUG(dbgs() << "SLP: store of array mappable to vector: " << *SI << "\n");
4857           if (tryToVectorizeList(BuildVectorOpds, R, BuildVector, false)) {
4858             Changed = true;
4859             it = BB->begin();
4860             e = BB->end();
4861           }
4862           continue;
4863         }
4864       }
4865     }
4866   }
4867 
4868   return Changed;
4869 }
4870 
4871 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) {
4872   auto Changed = false;
4873   for (auto &Entry : GEPs) {
4874 
4875     // If the getelementptr list has fewer than two elements, there's nothing
4876     // to do.
4877     if (Entry.second.size() < 2)
4878       continue;
4879 
4880     DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "
4881                  << Entry.second.size() << ".\n");
4882 
4883     // We process the getelementptr list in chunks of 16 (like we do for
4884     // stores) to minimize compile-time.
4885     for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += 16) {
4886       auto Len = std::min<unsigned>(BE - BI, 16);
4887       auto GEPList = makeArrayRef(&Entry.second[BI], Len);
4888 
4889       // Initialize a set a candidate getelementptrs. Note that we use a
4890       // SetVector here to preserve program order. If the index computations
4891       // are vectorizable and begin with loads, we want to minimize the chance
4892       // of having to reorder them later.
4893       SetVector<Value *> Candidates(GEPList.begin(), GEPList.end());
4894 
4895       // Some of the candidates may have already been vectorized after we
4896       // initially collected them. If so, the WeakVHs will have nullified the
4897       // values, so remove them from the set of candidates.
4898       Candidates.remove(nullptr);
4899 
4900       // Remove from the set of candidates all pairs of getelementptrs with
4901       // constant differences. Such getelementptrs are likely not good
4902       // candidates for vectorization in a bottom-up phase since one can be
4903       // computed from the other. We also ensure all candidate getelementptr
4904       // indices are unique.
4905       for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) {
4906         auto *GEPI = cast<GetElementPtrInst>(GEPList[I]);
4907         if (!Candidates.count(GEPI))
4908           continue;
4909         auto *SCEVI = SE->getSCEV(GEPList[I]);
4910         for (int J = I + 1; J < E && Candidates.size() > 1; ++J) {
4911           auto *GEPJ = cast<GetElementPtrInst>(GEPList[J]);
4912           auto *SCEVJ = SE->getSCEV(GEPList[J]);
4913           if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) {
4914             Candidates.remove(GEPList[I]);
4915             Candidates.remove(GEPList[J]);
4916           } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) {
4917             Candidates.remove(GEPList[J]);
4918           }
4919         }
4920       }
4921 
4922       // We break out of the above computation as soon as we know there are
4923       // fewer than two candidates remaining.
4924       if (Candidates.size() < 2)
4925         continue;
4926 
4927       // Add the single, non-constant index of each candidate to the bundle. We
4928       // ensured the indices met these constraints when we originally collected
4929       // the getelementptrs.
4930       SmallVector<Value *, 16> Bundle(Candidates.size());
4931       auto BundleIndex = 0u;
4932       for (auto *V : Candidates) {
4933         auto *GEP = cast<GetElementPtrInst>(V);
4934         auto *GEPIdx = GEP->idx_begin()->get();
4935         assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx));
4936         Bundle[BundleIndex++] = GEPIdx;
4937       }
4938 
4939       // Try and vectorize the indices. We are currently only interested in
4940       // gather-like cases of the form:
4941       //
4942       // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ...
4943       //
4944       // where the loads of "a", the loads of "b", and the subtractions can be
4945       // performed in parallel. It's likely that detecting this pattern in a
4946       // bottom-up phase will be simpler and less costly than building a
4947       // full-blown top-down phase beginning at the consecutive loads.
4948       Changed |= tryToVectorizeList(Bundle, R);
4949     }
4950   }
4951   return Changed;
4952 }
4953 
4954 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) {
4955   bool Changed = false;
4956   // Attempt to sort and vectorize each of the store-groups.
4957   for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e;
4958        ++it) {
4959     if (it->second.size() < 2)
4960       continue;
4961 
4962     DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
4963           << it->second.size() << ".\n");
4964 
4965     // Process the stores in chunks of 16.
4966     // TODO: The limit of 16 inhibits greater vectorization factors.
4967     //       For example, AVX2 supports v32i8. Increasing this limit, however,
4968     //       may cause a significant compile-time increase.
4969     for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) {
4970       unsigned Len = std::min<unsigned>(CE - CI, 16);
4971       Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len), R);
4972     }
4973   }
4974   return Changed;
4975 }
4976 
4977 char SLPVectorizer::ID = 0;
4978 static const char lv_name[] = "SLP Vectorizer";
4979 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false)
4980 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
4981 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
4982 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
4983 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
4984 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
4985 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
4986 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false)
4987 
4988 namespace llvm {
4989 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); }
4990 }
4991