1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10 // stores that can be put together into vector-stores. Next, it attempts to
11 // construct vectorizable tree using the use-def chains. If a profitable tree
12 // was found, the SLP vectorizer performs vectorization on the tree.
13 //
14 // The pass is inspired by the work described in the paper:
15 //  "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
16 //
17 //===----------------------------------------------------------------------===//
18 
19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/DenseSet.h"
22 #include "llvm/ADT/Optional.h"
23 #include "llvm/ADT/PostOrderIterator.h"
24 #include "llvm/ADT/PriorityQueue.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/SetOperations.h"
27 #include "llvm/ADT/SetVector.h"
28 #include "llvm/ADT/SmallBitVector.h"
29 #include "llvm/ADT/SmallPtrSet.h"
30 #include "llvm/ADT/SmallSet.h"
31 #include "llvm/ADT/SmallString.h"
32 #include "llvm/ADT/Statistic.h"
33 #include "llvm/ADT/iterator.h"
34 #include "llvm/ADT/iterator_range.h"
35 #include "llvm/Analysis/AliasAnalysis.h"
36 #include "llvm/Analysis/AssumptionCache.h"
37 #include "llvm/Analysis/CodeMetrics.h"
38 #include "llvm/Analysis/DemandedBits.h"
39 #include "llvm/Analysis/GlobalsModRef.h"
40 #include "llvm/Analysis/IVDescriptors.h"
41 #include "llvm/Analysis/LoopAccessAnalysis.h"
42 #include "llvm/Analysis/LoopInfo.h"
43 #include "llvm/Analysis/MemoryLocation.h"
44 #include "llvm/Analysis/MemorySSA.h"
45 #include "llvm/Analysis/MemorySSAUpdater.h"
46 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
47 #include "llvm/Analysis/ScalarEvolution.h"
48 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
49 #include "llvm/Analysis/TargetLibraryInfo.h"
50 #include "llvm/Analysis/TargetTransformInfo.h"
51 #include "llvm/Analysis/ValueTracking.h"
52 #include "llvm/Analysis/VectorUtils.h"
53 #include "llvm/IR/Attributes.h"
54 #include "llvm/IR/BasicBlock.h"
55 #include "llvm/IR/Constant.h"
56 #include "llvm/IR/Constants.h"
57 #include "llvm/IR/DataLayout.h"
58 #include "llvm/IR/DebugLoc.h"
59 #include "llvm/IR/DerivedTypes.h"
60 #include "llvm/IR/Dominators.h"
61 #include "llvm/IR/Function.h"
62 #include "llvm/IR/IRBuilder.h"
63 #include "llvm/IR/InstrTypes.h"
64 #include "llvm/IR/Instruction.h"
65 #include "llvm/IR/Instructions.h"
66 #include "llvm/IR/IntrinsicInst.h"
67 #include "llvm/IR/Intrinsics.h"
68 #include "llvm/IR/Module.h"
69 #include "llvm/IR/Operator.h"
70 #include "llvm/IR/PatternMatch.h"
71 #include "llvm/IR/Type.h"
72 #include "llvm/IR/Use.h"
73 #include "llvm/IR/User.h"
74 #include "llvm/IR/Value.h"
75 #include "llvm/IR/ValueHandle.h"
76 #include "llvm/IR/Verifier.h"
77 #include "llvm/Pass.h"
78 #include "llvm/Support/Casting.h"
79 #include "llvm/Support/CommandLine.h"
80 #include "llvm/Support/Compiler.h"
81 #include "llvm/Support/DOTGraphTraits.h"
82 #include "llvm/Support/Debug.h"
83 #include "llvm/Support/ErrorHandling.h"
84 #include "llvm/Support/GraphWriter.h"
85 #include "llvm/Support/InstructionCost.h"
86 #include "llvm/Support/KnownBits.h"
87 #include "llvm/Support/MathExtras.h"
88 #include "llvm/Support/raw_ostream.h"
89 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
90 #include "llvm/Transforms/Utils/LoopUtils.h"
91 #include "llvm/Transforms/Vectorize.h"
92 #include <algorithm>
93 #include <cassert>
94 #include <cstdint>
95 #include <iterator>
96 #include <memory>
97 #include <set>
98 #include <string>
99 #include <tuple>
100 #include <utility>
101 #include <vector>
102 
103 using namespace llvm;
104 using namespace llvm::PatternMatch;
105 using namespace slpvectorizer;
106 
107 #define SV_NAME "slp-vectorizer"
108 #define DEBUG_TYPE "SLP"
109 
110 STATISTIC(NumVectorInstructions, "Number of vector instructions generated");
111 
112 cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden,
113                                   cl::desc("Run the SLP vectorization passes"));
114 
115 static cl::opt<int>
116     SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
117                      cl::desc("Only vectorize if you gain more than this "
118                               "number "));
119 
120 static cl::opt<bool>
121 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden,
122                    cl::desc("Attempt to vectorize horizontal reductions"));
123 
124 static cl::opt<bool> ShouldStartVectorizeHorAtStore(
125     "slp-vectorize-hor-store", cl::init(false), cl::Hidden,
126     cl::desc(
127         "Attempt to vectorize horizontal reductions feeding into a store"));
128 
129 static cl::opt<int>
130 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden,
131     cl::desc("Attempt to vectorize for this register size in bits"));
132 
133 static cl::opt<unsigned>
134 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden,
135     cl::desc("Maximum SLP vectorization factor (0=unlimited)"));
136 
137 static cl::opt<int>
138 MaxStoreLookup("slp-max-store-lookup", cl::init(32), cl::Hidden,
139     cl::desc("Maximum depth of the lookup for consecutive stores."));
140 
141 /// Limits the size of scheduling regions in a block.
142 /// It avoid long compile times for _very_ large blocks where vector
143 /// instructions are spread over a wide range.
144 /// This limit is way higher than needed by real-world functions.
145 static cl::opt<int>
146 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden,
147     cl::desc("Limit the size of the SLP scheduling region per block"));
148 
149 static cl::opt<int> MinVectorRegSizeOption(
150     "slp-min-reg-size", cl::init(128), cl::Hidden,
151     cl::desc("Attempt to vectorize for this register size in bits"));
152 
153 static cl::opt<unsigned> RecursionMaxDepth(
154     "slp-recursion-max-depth", cl::init(12), cl::Hidden,
155     cl::desc("Limit the recursion depth when building a vectorizable tree"));
156 
157 static cl::opt<unsigned> MinTreeSize(
158     "slp-min-tree-size", cl::init(3), cl::Hidden,
159     cl::desc("Only vectorize small trees if they are fully vectorizable"));
160 
161 // The maximum depth that the look-ahead score heuristic will explore.
162 // The higher this value, the higher the compilation time overhead.
163 static cl::opt<int> LookAheadMaxDepth(
164     "slp-max-look-ahead-depth", cl::init(2), cl::Hidden,
165     cl::desc("The maximum look-ahead depth for operand reordering scores"));
166 
167 static cl::opt<bool>
168     ViewSLPTree("view-slp-tree", cl::Hidden,
169                 cl::desc("Display the SLP trees with Graphviz"));
170 
171 static cl::opt<bool> EnableMSSAInSLPVectorizer(
172     "enable-mssa-in-slp-vectorizer", cl::Hidden, cl::init(false),
173     cl::desc("Enable MemorySSA for SLPVectorizer in new pass manager"));
174 
175 // Limit the number of alias checks. The limit is chosen so that
176 // it has no negative effect on the llvm benchmarks.
177 static const unsigned AliasedCheckLimit = 10;
178 
179 // Another limit for the alias checks: The maximum distance between load/store
180 // instructions where alias checks are done.
181 // This limit is useful for very large basic blocks.
182 static const unsigned MaxMemDepDistance = 160;
183 
184 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling
185 /// regions to be handled.
186 static const int MinScheduleRegionSize = 16;
187 
188 /// Predicate for the element types that the SLP vectorizer supports.
189 ///
190 /// The most important thing to filter here are types which are invalid in LLVM
191 /// vectors. We also filter target specific types which have absolutely no
192 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
193 /// avoids spending time checking the cost model and realizing that they will
194 /// be inevitably scalarized.
195 static bool isValidElementType(Type *Ty) {
196   return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
197          !Ty->isPPC_FP128Ty();
198 }
199 
200 /// \returns True if the value is a constant (but not globals/constant
201 /// expressions).
202 static bool isConstant(Value *V) {
203   return isa<Constant>(V) && !isa<ConstantExpr>(V) && !isa<GlobalValue>(V);
204 }
205 
206 /// Checks if \p V is one of vector-like instructions, i.e. undef,
207 /// insertelement/extractelement with constant indices for fixed vector type or
208 /// extractvalue instruction.
209 static bool isVectorLikeInstWithConstOps(Value *V) {
210   if (!isa<InsertElementInst, ExtractElementInst>(V) &&
211       !isa<ExtractValueInst, UndefValue>(V))
212     return false;
213   auto *I = dyn_cast<Instruction>(V);
214   if (!I || isa<ExtractValueInst>(I))
215     return true;
216   if (!isa<FixedVectorType>(I->getOperand(0)->getType()))
217     return false;
218   if (isa<ExtractElementInst>(I))
219     return isConstant(I->getOperand(1));
220   assert(isa<InsertElementInst>(V) && "Expected only insertelement.");
221   return isConstant(I->getOperand(2));
222 }
223 
224 /// \returns true if all of the instructions in \p VL are in the same block or
225 /// false otherwise.
226 static bool allSameBlock(ArrayRef<Value *> VL) {
227   Instruction *I0 = dyn_cast<Instruction>(VL[0]);
228   if (!I0)
229     return false;
230   if (all_of(VL, isVectorLikeInstWithConstOps))
231     return true;
232 
233   BasicBlock *BB = I0->getParent();
234   for (int I = 1, E = VL.size(); I < E; I++) {
235     auto *II = dyn_cast<Instruction>(VL[I]);
236     if (!II)
237       return false;
238 
239     if (BB != II->getParent())
240       return false;
241   }
242   return true;
243 }
244 
245 /// \returns True if all of the values in \p VL are constants (but not
246 /// globals/constant expressions).
247 static bool allConstant(ArrayRef<Value *> VL) {
248   // Constant expressions and globals can't be vectorized like normal integer/FP
249   // constants.
250   return all_of(VL, isConstant);
251 }
252 
253 /// \returns True if all of the values in \p VL are identical or some of them
254 /// are UndefValue.
255 static bool isSplat(ArrayRef<Value *> VL) {
256   Value *FirstNonUndef = nullptr;
257   for (Value *V : VL) {
258     if (isa<UndefValue>(V))
259       continue;
260     if (!FirstNonUndef) {
261       FirstNonUndef = V;
262       continue;
263     }
264     if (V != FirstNonUndef)
265       return false;
266   }
267   return FirstNonUndef != nullptr;
268 }
269 
270 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator.
271 static bool isCommutative(Instruction *I) {
272   if (auto *Cmp = dyn_cast<CmpInst>(I))
273     return Cmp->isCommutative();
274   if (auto *BO = dyn_cast<BinaryOperator>(I))
275     return BO->isCommutative();
276   // TODO: This should check for generic Instruction::isCommutative(), but
277   //       we need to confirm that the caller code correctly handles Intrinsics
278   //       for example (does not have 2 operands).
279   return false;
280 }
281 
282 /// Checks if the given value is actually an undefined constant vector.
283 static bool isUndefVector(const Value *V) {
284   if (isa<UndefValue>(V))
285     return true;
286   auto *C = dyn_cast<Constant>(V);
287   if (!C)
288     return false;
289   if (!C->containsUndefOrPoisonElement())
290     return false;
291   auto *VecTy = dyn_cast<FixedVectorType>(C->getType());
292   if (!VecTy)
293     return false;
294   for (unsigned I = 0, E = VecTy->getNumElements(); I != E; ++I) {
295     if (Constant *Elem = C->getAggregateElement(I))
296       if (!isa<UndefValue>(Elem))
297         return false;
298   }
299   return true;
300 }
301 
302 /// Checks if the vector of instructions can be represented as a shuffle, like:
303 /// %x0 = extractelement <4 x i8> %x, i32 0
304 /// %x3 = extractelement <4 x i8> %x, i32 3
305 /// %y1 = extractelement <4 x i8> %y, i32 1
306 /// %y2 = extractelement <4 x i8> %y, i32 2
307 /// %x0x0 = mul i8 %x0, %x0
308 /// %x3x3 = mul i8 %x3, %x3
309 /// %y1y1 = mul i8 %y1, %y1
310 /// %y2y2 = mul i8 %y2, %y2
311 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0
312 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1
313 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2
314 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3
315 /// ret <4 x i8> %ins4
316 /// can be transformed into:
317 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5,
318 ///                                                         i32 6>
319 /// %2 = mul <4 x i8> %1, %1
320 /// ret <4 x i8> %2
321 /// We convert this initially to something like:
322 /// %x0 = extractelement <4 x i8> %x, i32 0
323 /// %x3 = extractelement <4 x i8> %x, i32 3
324 /// %y1 = extractelement <4 x i8> %y, i32 1
325 /// %y2 = extractelement <4 x i8> %y, i32 2
326 /// %1 = insertelement <4 x i8> poison, i8 %x0, i32 0
327 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1
328 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2
329 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3
330 /// %5 = mul <4 x i8> %4, %4
331 /// %6 = extractelement <4 x i8> %5, i32 0
332 /// %ins1 = insertelement <4 x i8> poison, i8 %6, i32 0
333 /// %7 = extractelement <4 x i8> %5, i32 1
334 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1
335 /// %8 = extractelement <4 x i8> %5, i32 2
336 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2
337 /// %9 = extractelement <4 x i8> %5, i32 3
338 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3
339 /// ret <4 x i8> %ins4
340 /// InstCombiner transforms this into a shuffle and vector mul
341 /// Mask will return the Shuffle Mask equivalent to the extracted elements.
342 /// TODO: Can we split off and reuse the shuffle mask detection from
343 /// TargetTransformInfo::getInstructionThroughput?
344 static Optional<TargetTransformInfo::ShuffleKind>
345 isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) {
346   const auto *It =
347       find_if(VL, [](Value *V) { return isa<ExtractElementInst>(V); });
348   if (It == VL.end())
349     return None;
350   auto *EI0 = cast<ExtractElementInst>(*It);
351   if (isa<ScalableVectorType>(EI0->getVectorOperandType()))
352     return None;
353   unsigned Size =
354       cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements();
355   Value *Vec1 = nullptr;
356   Value *Vec2 = nullptr;
357   enum ShuffleMode { Unknown, Select, Permute };
358   ShuffleMode CommonShuffleMode = Unknown;
359   Mask.assign(VL.size(), UndefMaskElem);
360   for (unsigned I = 0, E = VL.size(); I < E; ++I) {
361     // Undef can be represented as an undef element in a vector.
362     if (isa<UndefValue>(VL[I]))
363       continue;
364     auto *EI = cast<ExtractElementInst>(VL[I]);
365     if (isa<ScalableVectorType>(EI->getVectorOperandType()))
366       return None;
367     auto *Vec = EI->getVectorOperand();
368     // We can extractelement from undef or poison vector.
369     if (isUndefVector(Vec))
370       continue;
371     // All vector operands must have the same number of vector elements.
372     if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size)
373       return None;
374     if (isa<UndefValue>(EI->getIndexOperand()))
375       continue;
376     auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand());
377     if (!Idx)
378       return None;
379     // Undefined behavior if Idx is negative or >= Size.
380     if (Idx->getValue().uge(Size))
381       continue;
382     unsigned IntIdx = Idx->getValue().getZExtValue();
383     Mask[I] = IntIdx;
384     // For correct shuffling we have to have at most 2 different vector operands
385     // in all extractelement instructions.
386     if (!Vec1 || Vec1 == Vec) {
387       Vec1 = Vec;
388     } else if (!Vec2 || Vec2 == Vec) {
389       Vec2 = Vec;
390       Mask[I] += Size;
391     } else {
392       return None;
393     }
394     if (CommonShuffleMode == Permute)
395       continue;
396     // If the extract index is not the same as the operation number, it is a
397     // permutation.
398     if (IntIdx != I) {
399       CommonShuffleMode = Permute;
400       continue;
401     }
402     CommonShuffleMode = Select;
403   }
404   // If we're not crossing lanes in different vectors, consider it as blending.
405   if (CommonShuffleMode == Select && Vec2)
406     return TargetTransformInfo::SK_Select;
407   // If Vec2 was never used, we have a permutation of a single vector, otherwise
408   // we have permutation of 2 vectors.
409   return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc
410               : TargetTransformInfo::SK_PermuteSingleSrc;
411 }
412 
413 namespace {
414 
415 /// Main data required for vectorization of instructions.
416 struct InstructionsState {
417   /// The very first instruction in the list with the main opcode.
418   Value *OpValue = nullptr;
419 
420   /// The main/alternate instruction.
421   Instruction *MainOp = nullptr;
422   Instruction *AltOp = nullptr;
423 
424   /// The main/alternate opcodes for the list of instructions.
425   unsigned getOpcode() const {
426     return MainOp ? MainOp->getOpcode() : 0;
427   }
428 
429   unsigned getAltOpcode() const {
430     return AltOp ? AltOp->getOpcode() : 0;
431   }
432 
433   /// Some of the instructions in the list have alternate opcodes.
434   bool isAltShuffle() const { return AltOp != MainOp; }
435 
436   bool isOpcodeOrAlt(Instruction *I) const {
437     unsigned CheckedOpcode = I->getOpcode();
438     return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode;
439   }
440 
441   InstructionsState() = delete;
442   InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp)
443       : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {}
444 };
445 
446 } // end anonymous namespace
447 
448 /// Chooses the correct key for scheduling data. If \p Op has the same (or
449 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p
450 /// OpValue.
451 static Value *isOneOf(const InstructionsState &S, Value *Op) {
452   auto *I = dyn_cast<Instruction>(Op);
453   if (I && S.isOpcodeOrAlt(I))
454     return Op;
455   return S.OpValue;
456 }
457 
458 /// \returns true if \p Opcode is allowed as part of of the main/alternate
459 /// instruction for SLP vectorization.
460 ///
461 /// Example of unsupported opcode is SDIV that can potentially cause UB if the
462 /// "shuffled out" lane would result in division by zero.
463 static bool isValidForAlternation(unsigned Opcode) {
464   if (Instruction::isIntDivRem(Opcode))
465     return false;
466 
467   return true;
468 }
469 
470 static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
471                                        unsigned BaseIndex = 0);
472 
473 /// Checks if the provided operands of 2 cmp instructions are compatible, i.e.
474 /// compatible instructions or constants, or just some other regular values.
475 static bool areCompatibleCmpOps(Value *BaseOp0, Value *BaseOp1, Value *Op0,
476                                 Value *Op1) {
477   return (isConstant(BaseOp0) && isConstant(Op0)) ||
478          (isConstant(BaseOp1) && isConstant(Op1)) ||
479          (!isa<Instruction>(BaseOp0) && !isa<Instruction>(Op0) &&
480           !isa<Instruction>(BaseOp1) && !isa<Instruction>(Op1)) ||
481          getSameOpcode({BaseOp0, Op0}).getOpcode() ||
482          getSameOpcode({BaseOp1, Op1}).getOpcode();
483 }
484 
485 /// \returns analysis of the Instructions in \p VL described in
486 /// InstructionsState, the Opcode that we suppose the whole list
487 /// could be vectorized even if its structure is diverse.
488 static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
489                                        unsigned BaseIndex) {
490   // Make sure these are all Instructions.
491   if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); }))
492     return InstructionsState(VL[BaseIndex], nullptr, nullptr);
493 
494   bool IsCastOp = isa<CastInst>(VL[BaseIndex]);
495   bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]);
496   bool IsCmpOp = isa<CmpInst>(VL[BaseIndex]);
497   CmpInst::Predicate BasePred =
498       IsCmpOp ? cast<CmpInst>(VL[BaseIndex])->getPredicate()
499               : CmpInst::BAD_ICMP_PREDICATE;
500   unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode();
501   unsigned AltOpcode = Opcode;
502   unsigned AltIndex = BaseIndex;
503 
504   // Check for one alternate opcode from another BinaryOperator.
505   // TODO - generalize to support all operators (types, calls etc.).
506   for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) {
507     unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode();
508     if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) {
509       if (InstOpcode == Opcode || InstOpcode == AltOpcode)
510         continue;
511       if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) &&
512           isValidForAlternation(Opcode)) {
513         AltOpcode = InstOpcode;
514         AltIndex = Cnt;
515         continue;
516       }
517     } else if (IsCastOp && isa<CastInst>(VL[Cnt])) {
518       Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType();
519       Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType();
520       if (Ty0 == Ty1) {
521         if (InstOpcode == Opcode || InstOpcode == AltOpcode)
522           continue;
523         if (Opcode == AltOpcode) {
524           assert(isValidForAlternation(Opcode) &&
525                  isValidForAlternation(InstOpcode) &&
526                  "Cast isn't safe for alternation, logic needs to be updated!");
527           AltOpcode = InstOpcode;
528           AltIndex = Cnt;
529           continue;
530         }
531       }
532     } else if (IsCmpOp && isa<CmpInst>(VL[Cnt])) {
533       auto *BaseInst = cast<Instruction>(VL[BaseIndex]);
534       auto *Inst = cast<Instruction>(VL[Cnt]);
535       Type *Ty0 = BaseInst->getOperand(0)->getType();
536       Type *Ty1 = Inst->getOperand(0)->getType();
537       if (Ty0 == Ty1) {
538         Value *BaseOp0 = BaseInst->getOperand(0);
539         Value *BaseOp1 = BaseInst->getOperand(1);
540         Value *Op0 = Inst->getOperand(0);
541         Value *Op1 = Inst->getOperand(1);
542         CmpInst::Predicate CurrentPred =
543             cast<CmpInst>(VL[Cnt])->getPredicate();
544         CmpInst::Predicate SwappedCurrentPred =
545             CmpInst::getSwappedPredicate(CurrentPred);
546         // Check for compatible operands. If the corresponding operands are not
547         // compatible - need to perform alternate vectorization.
548         if (InstOpcode == Opcode) {
549           if (BasePred == CurrentPred &&
550               areCompatibleCmpOps(BaseOp0, BaseOp1, Op0, Op1))
551             continue;
552           if (BasePred == SwappedCurrentPred &&
553               areCompatibleCmpOps(BaseOp0, BaseOp1, Op1, Op0))
554             continue;
555           if (E == 2 &&
556               (BasePred == CurrentPred || BasePred == SwappedCurrentPred))
557             continue;
558           auto *AltInst = cast<CmpInst>(VL[AltIndex]);
559           CmpInst::Predicate AltPred = AltInst->getPredicate();
560           Value *AltOp0 = AltInst->getOperand(0);
561           Value *AltOp1 = AltInst->getOperand(1);
562           // Check if operands are compatible with alternate operands.
563           if (AltPred == CurrentPred &&
564               areCompatibleCmpOps(AltOp0, AltOp1, Op0, Op1))
565             continue;
566           if (AltPred == SwappedCurrentPred &&
567               areCompatibleCmpOps(AltOp0, AltOp1, Op1, Op0))
568             continue;
569         }
570         if (BaseIndex == AltIndex && BasePred != CurrentPred) {
571           assert(isValidForAlternation(Opcode) &&
572                  isValidForAlternation(InstOpcode) &&
573                  "Cast isn't safe for alternation, logic needs to be updated!");
574           AltIndex = Cnt;
575           continue;
576         }
577         auto *AltInst = cast<CmpInst>(VL[AltIndex]);
578         CmpInst::Predicate AltPred = AltInst->getPredicate();
579         if (BasePred == CurrentPred || BasePred == SwappedCurrentPred ||
580             AltPred == CurrentPred || AltPred == SwappedCurrentPred)
581           continue;
582       }
583     } else if (InstOpcode == Opcode || InstOpcode == AltOpcode)
584       continue;
585     return InstructionsState(VL[BaseIndex], nullptr, nullptr);
586   }
587 
588   return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]),
589                            cast<Instruction>(VL[AltIndex]));
590 }
591 
592 /// \returns true if all of the values in \p VL have the same type or false
593 /// otherwise.
594 static bool allSameType(ArrayRef<Value *> VL) {
595   Type *Ty = VL[0]->getType();
596   for (int i = 1, e = VL.size(); i < e; i++)
597     if (VL[i]->getType() != Ty)
598       return false;
599 
600   return true;
601 }
602 
603 /// \returns True if Extract{Value,Element} instruction extracts element Idx.
604 static Optional<unsigned> getExtractIndex(Instruction *E) {
605   unsigned Opcode = E->getOpcode();
606   assert((Opcode == Instruction::ExtractElement ||
607           Opcode == Instruction::ExtractValue) &&
608          "Expected extractelement or extractvalue instruction.");
609   if (Opcode == Instruction::ExtractElement) {
610     auto *CI = dyn_cast<ConstantInt>(E->getOperand(1));
611     if (!CI)
612       return None;
613     return CI->getZExtValue();
614   }
615   ExtractValueInst *EI = cast<ExtractValueInst>(E);
616   if (EI->getNumIndices() != 1)
617     return None;
618   return *EI->idx_begin();
619 }
620 
621 /// \returns True if in-tree use also needs extract. This refers to
622 /// possible scalar operand in vectorized instruction.
623 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
624                                     TargetLibraryInfo *TLI) {
625   unsigned Opcode = UserInst->getOpcode();
626   switch (Opcode) {
627   case Instruction::Load: {
628     LoadInst *LI = cast<LoadInst>(UserInst);
629     return (LI->getPointerOperand() == Scalar);
630   }
631   case Instruction::Store: {
632     StoreInst *SI = cast<StoreInst>(UserInst);
633     return (SI->getPointerOperand() == Scalar);
634   }
635   case Instruction::Call: {
636     CallInst *CI = cast<CallInst>(UserInst);
637     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
638     for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) {
639       if (hasVectorInstrinsicScalarOpd(ID, i))
640         return (CI->getArgOperand(i) == Scalar);
641     }
642     LLVM_FALLTHROUGH;
643   }
644   default:
645     return false;
646   }
647 }
648 
649 /// \returns the AA location that is being access by the instruction.
650 static MemoryLocation getLocation(Instruction *I) {
651   if (StoreInst *SI = dyn_cast<StoreInst>(I))
652     return MemoryLocation::get(SI);
653   if (LoadInst *LI = dyn_cast<LoadInst>(I))
654     return MemoryLocation::get(LI);
655   return MemoryLocation();
656 }
657 
658 /// \returns True if the instruction is not a volatile or atomic load/store.
659 static bool isSimple(Instruction *I) {
660   if (LoadInst *LI = dyn_cast<LoadInst>(I))
661     return LI->isSimple();
662   if (StoreInst *SI = dyn_cast<StoreInst>(I))
663     return SI->isSimple();
664   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
665     return !MI->isVolatile();
666   return true;
667 }
668 
669 /// Shuffles \p Mask in accordance with the given \p SubMask.
670 static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask) {
671   if (SubMask.empty())
672     return;
673   if (Mask.empty()) {
674     Mask.append(SubMask.begin(), SubMask.end());
675     return;
676   }
677   SmallVector<int> NewMask(SubMask.size(), UndefMaskElem);
678   int TermValue = std::min(Mask.size(), SubMask.size());
679   for (int I = 0, E = SubMask.size(); I < E; ++I) {
680     if (SubMask[I] >= TermValue || SubMask[I] == UndefMaskElem ||
681         Mask[SubMask[I]] >= TermValue)
682       continue;
683     NewMask[I] = Mask[SubMask[I]];
684   }
685   Mask.swap(NewMask);
686 }
687 
688 /// Order may have elements assigned special value (size) which is out of
689 /// bounds. Such indices only appear on places which correspond to undef values
690 /// (see canReuseExtract for details) and used in order to avoid undef values
691 /// have effect on operands ordering.
692 /// The first loop below simply finds all unused indices and then the next loop
693 /// nest assigns these indices for undef values positions.
694 /// As an example below Order has two undef positions and they have assigned
695 /// values 3 and 7 respectively:
696 /// before:  6 9 5 4 9 2 1 0
697 /// after:   6 3 5 4 7 2 1 0
698 static void fixupOrderingIndices(SmallVectorImpl<unsigned> &Order) {
699   const unsigned Sz = Order.size();
700   SmallBitVector UnusedIndices(Sz, /*t=*/true);
701   SmallBitVector MaskedIndices(Sz);
702   for (unsigned I = 0; I < Sz; ++I) {
703     if (Order[I] < Sz)
704       UnusedIndices.reset(Order[I]);
705     else
706       MaskedIndices.set(I);
707   }
708   if (MaskedIndices.none())
709     return;
710   assert(UnusedIndices.count() == MaskedIndices.count() &&
711          "Non-synced masked/available indices.");
712   int Idx = UnusedIndices.find_first();
713   int MIdx = MaskedIndices.find_first();
714   while (MIdx >= 0) {
715     assert(Idx >= 0 && "Indices must be synced.");
716     Order[MIdx] = Idx;
717     Idx = UnusedIndices.find_next(Idx);
718     MIdx = MaskedIndices.find_next(MIdx);
719   }
720 }
721 
722 namespace llvm {
723 
724 static void inversePermutation(ArrayRef<unsigned> Indices,
725                                SmallVectorImpl<int> &Mask) {
726   Mask.clear();
727   const unsigned E = Indices.size();
728   Mask.resize(E, UndefMaskElem);
729   for (unsigned I = 0; I < E; ++I)
730     Mask[Indices[I]] = I;
731 }
732 
733 /// \returns inserting index of InsertElement or InsertValue instruction,
734 /// using Offset as base offset for index.
735 static Optional<unsigned> getInsertIndex(Value *InsertInst,
736                                          unsigned Offset = 0) {
737   int Index = Offset;
738   if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) {
739     if (auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2))) {
740       auto *VT = cast<FixedVectorType>(IE->getType());
741       if (CI->getValue().uge(VT->getNumElements()))
742         return None;
743       Index *= VT->getNumElements();
744       Index += CI->getZExtValue();
745       return Index;
746     }
747     return None;
748   }
749 
750   auto *IV = cast<InsertValueInst>(InsertInst);
751   Type *CurrentType = IV->getType();
752   for (unsigned I : IV->indices()) {
753     if (auto *ST = dyn_cast<StructType>(CurrentType)) {
754       Index *= ST->getNumElements();
755       CurrentType = ST->getElementType(I);
756     } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) {
757       Index *= AT->getNumElements();
758       CurrentType = AT->getElementType();
759     } else {
760       return None;
761     }
762     Index += I;
763   }
764   return Index;
765 }
766 
767 /// Reorders the list of scalars in accordance with the given \p Mask.
768 static void reorderScalars(SmallVectorImpl<Value *> &Scalars,
769                            ArrayRef<int> Mask) {
770   assert(!Mask.empty() && "Expected non-empty mask.");
771   SmallVector<Value *> Prev(Scalars.size(),
772                             UndefValue::get(Scalars.front()->getType()));
773   Prev.swap(Scalars);
774   for (unsigned I = 0, E = Prev.size(); I < E; ++I)
775     if (Mask[I] != UndefMaskElem)
776       Scalars[Mask[I]] = Prev[I];
777 }
778 
779 namespace slpvectorizer {
780 
781 /// Bottom Up SLP Vectorizer.
782 class BoUpSLP {
783   struct TreeEntry;
784   struct ScheduleData;
785 
786 public:
787   using ValueList = SmallVector<Value *, 8>;
788   using InstrList = SmallVector<Instruction *, 16>;
789   using ValueSet = SmallPtrSet<Value *, 16>;
790   using StoreList = SmallVector<StoreInst *, 8>;
791   using ExtraValueToDebugLocsMap =
792       MapVector<Value *, SmallVector<Instruction *, 2>>;
793   using OrdersType = SmallVector<unsigned, 4>;
794 
795   BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti,
796           TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li,
797           DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB,
798           MemorySSA *MSSA, const DataLayout *DL, OptimizationRemarkEmitter *ORE)
799       : BatchAA(*Aa), F(Func), SE(Se), TTI(Tti), TLI(TLi), LI(Li),
800         DT(Dt), AC(AC), DB(DB), MSSA(MSSA), DL(DL), ORE(ORE),
801         Builder(Se->getContext()) {
802     CodeMetrics::collectEphemeralValues(F, AC, EphValues);
803     // Use the vector register size specified by the target unless overridden
804     // by a command-line option.
805     // TODO: It would be better to limit the vectorization factor based on
806     //       data type rather than just register size. For example, x86 AVX has
807     //       256-bit registers, but it does not support integer operations
808     //       at that width (that requires AVX2).
809     if (MaxVectorRegSizeOption.getNumOccurrences())
810       MaxVecRegSize = MaxVectorRegSizeOption;
811     else
812       MaxVecRegSize =
813           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
814               .getFixedSize();
815 
816     if (MinVectorRegSizeOption.getNumOccurrences())
817       MinVecRegSize = MinVectorRegSizeOption;
818     else
819       MinVecRegSize = TTI->getMinVectorRegisterBitWidth();
820   }
821 
822   /// Vectorize the tree that starts with the elements in \p VL.
823   /// Returns the vectorized root.
824   Value *vectorizeTree();
825 
826   /// Vectorize the tree but with the list of externally used values \p
827   /// ExternallyUsedValues. Values in this MapVector can be replaced but the
828   /// generated extractvalue instructions.
829   Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues);
830 
831   /// \returns the cost incurred by unwanted spills and fills, caused by
832   /// holding live values over call sites.
833   InstructionCost getSpillCost() const;
834 
835   /// \returns the vectorization cost of the subtree that starts at \p VL.
836   /// A negative number means that this is profitable.
837   InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = None);
838 
839   /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
840   /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
841   void buildTree(ArrayRef<Value *> Roots,
842                  ArrayRef<Value *> UserIgnoreLst = None);
843 
844   /// Builds external uses of the vectorized scalars, i.e. the list of
845   /// vectorized scalars to be extracted, their lanes and their scalar users. \p
846   /// ExternallyUsedValues contains additional list of external uses to handle
847   /// vectorization of reductions.
848   void
849   buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {});
850 
851   /// Clear the internal data structures that are created by 'buildTree'.
852   void deleteTree() {
853     VectorizableTree.clear();
854     ScalarToTreeEntry.clear();
855     MustGather.clear();
856     ExternalUses.clear();
857     for (auto &Iter : BlocksSchedules) {
858       BlockScheduling *BS = Iter.second.get();
859       BS->clear();
860     }
861     MinBWs.clear();
862     InstrElementSize.clear();
863   }
864 
865   unsigned getTreeSize() const { return VectorizableTree.size(); }
866 
867   /// Perform LICM and CSE on the newly generated gather sequences.
868   void optimizeGatherSequence();
869 
870   /// Checks if the specified gather tree entry \p TE can be represented as a
871   /// shuffled vector entry + (possibly) permutation with other gathers. It
872   /// implements the checks only for possibly ordered scalars (Loads,
873   /// ExtractElement, ExtractValue), which can be part of the graph.
874   Optional<OrdersType> findReusedOrderedScalars(const TreeEntry &TE);
875 
876   /// Gets reordering data for the given tree entry. If the entry is vectorized
877   /// - just return ReorderIndices, otherwise check if the scalars can be
878   /// reordered and return the most optimal order.
879   /// \param TopToBottom If true, include the order of vectorized stores and
880   /// insertelement nodes, otherwise skip them.
881   Optional<OrdersType> getReorderingData(const TreeEntry &TE, bool TopToBottom);
882 
883   /// Reorders the current graph to the most profitable order starting from the
884   /// root node to the leaf nodes. The best order is chosen only from the nodes
885   /// of the same size (vectorization factor). Smaller nodes are considered
886   /// parts of subgraph with smaller VF and they are reordered independently. We
887   /// can make it because we still need to extend smaller nodes to the wider VF
888   /// and we can merge reordering shuffles with the widening shuffles.
889   void reorderTopToBottom();
890 
891   /// Reorders the current graph to the most profitable order starting from
892   /// leaves to the root. It allows to rotate small subgraphs and reduce the
893   /// number of reshuffles if the leaf nodes use the same order. In this case we
894   /// can merge the orders and just shuffle user node instead of shuffling its
895   /// operands. Plus, even the leaf nodes have different orders, it allows to
896   /// sink reordering in the graph closer to the root node and merge it later
897   /// during analysis.
898   void reorderBottomToTop(bool IgnoreReorder = false);
899 
900   /// \return The vector element size in bits to use when vectorizing the
901   /// expression tree ending at \p V. If V is a store, the size is the width of
902   /// the stored value. Otherwise, the size is the width of the largest loaded
903   /// value reaching V. This method is used by the vectorizer to calculate
904   /// vectorization factors.
905   unsigned getVectorElementSize(Value *V);
906 
907   /// Compute the minimum type sizes required to represent the entries in a
908   /// vectorizable tree.
909   void computeMinimumValueSizes();
910 
911   // \returns maximum vector register size as set by TTI or overridden by cl::opt.
912   unsigned getMaxVecRegSize() const {
913     return MaxVecRegSize;
914   }
915 
916   // \returns minimum vector register size as set by cl::opt.
917   unsigned getMinVecRegSize() const {
918     return MinVecRegSize;
919   }
920 
921   unsigned getMinVF(unsigned Sz) const {
922     return std::max(2U, getMinVecRegSize() / Sz);
923   }
924 
925   unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
926     unsigned MaxVF = MaxVFOption.getNumOccurrences() ?
927       MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode);
928     return MaxVF ? MaxVF : UINT_MAX;
929   }
930 
931   /// Check if homogeneous aggregate is isomorphic to some VectorType.
932   /// Accepts homogeneous multidimensional aggregate of scalars/vectors like
933   /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> },
934   /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on.
935   ///
936   /// \returns number of elements in vector if isomorphism exists, 0 otherwise.
937   unsigned canMapToVector(Type *T, const DataLayout &DL) const;
938 
939   /// \returns True if the VectorizableTree is both tiny and not fully
940   /// vectorizable. We do not vectorize such trees.
941   bool isTreeTinyAndNotFullyVectorizable(bool ForReduction = false) const;
942 
943   /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values
944   /// can be load combined in the backend. Load combining may not be allowed in
945   /// the IR optimizer, so we do not want to alter the pattern. For example,
946   /// partially transforming a scalar bswap() pattern into vector code is
947   /// effectively impossible for the backend to undo.
948   /// TODO: If load combining is allowed in the IR optimizer, this analysis
949   ///       may not be necessary.
950   bool isLoadCombineReductionCandidate(RecurKind RdxKind) const;
951 
952   /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values
953   /// can be load combined in the backend. Load combining may not be allowed in
954   /// the IR optimizer, so we do not want to alter the pattern. For example,
955   /// partially transforming a scalar bswap() pattern into vector code is
956   /// effectively impossible for the backend to undo.
957   /// TODO: If load combining is allowed in the IR optimizer, this analysis
958   ///       may not be necessary.
959   bool isLoadCombineCandidate() const;
960 
961   OptimizationRemarkEmitter *getORE() { return ORE; }
962 
963   /// This structure holds any data we need about the edges being traversed
964   /// during buildTree_rec(). We keep track of:
965   /// (i) the user TreeEntry index, and
966   /// (ii) the index of the edge.
967   struct EdgeInfo {
968     EdgeInfo() = default;
969     EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx)
970         : UserTE(UserTE), EdgeIdx(EdgeIdx) {}
971     /// The user TreeEntry.
972     TreeEntry *UserTE = nullptr;
973     /// The operand index of the use.
974     unsigned EdgeIdx = UINT_MAX;
975 #ifndef NDEBUG
976     friend inline raw_ostream &operator<<(raw_ostream &OS,
977                                           const BoUpSLP::EdgeInfo &EI) {
978       EI.dump(OS);
979       return OS;
980     }
981     /// Debug print.
982     void dump(raw_ostream &OS) const {
983       OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null")
984          << " EdgeIdx:" << EdgeIdx << "}";
985     }
986     LLVM_DUMP_METHOD void dump() const { dump(dbgs()); }
987 #endif
988   };
989 
990   /// A helper data structure to hold the operands of a vector of instructions.
991   /// This supports a fixed vector length for all operand vectors.
992   class VLOperands {
993     /// For each operand we need (i) the value, and (ii) the opcode that it
994     /// would be attached to if the expression was in a left-linearized form.
995     /// This is required to avoid illegal operand reordering.
996     /// For example:
997     /// \verbatim
998     ///                         0 Op1
999     ///                         |/
1000     /// Op1 Op2   Linearized    + Op2
1001     ///   \ /     ---------->   |/
1002     ///    -                    -
1003     ///
1004     /// Op1 - Op2            (0 + Op1) - Op2
1005     /// \endverbatim
1006     ///
1007     /// Value Op1 is attached to a '+' operation, and Op2 to a '-'.
1008     ///
1009     /// Another way to think of this is to track all the operations across the
1010     /// path from the operand all the way to the root of the tree and to
1011     /// calculate the operation that corresponds to this path. For example, the
1012     /// path from Op2 to the root crosses the RHS of the '-', therefore the
1013     /// corresponding operation is a '-' (which matches the one in the
1014     /// linearized tree, as shown above).
1015     ///
1016     /// For lack of a better term, we refer to this operation as Accumulated
1017     /// Path Operation (APO).
1018     struct OperandData {
1019       OperandData() = default;
1020       OperandData(Value *V, bool APO, bool IsUsed)
1021           : V(V), APO(APO), IsUsed(IsUsed) {}
1022       /// The operand value.
1023       Value *V = nullptr;
1024       /// TreeEntries only allow a single opcode, or an alternate sequence of
1025       /// them (e.g, +, -). Therefore, we can safely use a boolean value for the
1026       /// APO. It is set to 'true' if 'V' is attached to an inverse operation
1027       /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise
1028       /// (e.g., Add/Mul)
1029       bool APO = false;
1030       /// Helper data for the reordering function.
1031       bool IsUsed = false;
1032     };
1033 
1034     /// During operand reordering, we are trying to select the operand at lane
1035     /// that matches best with the operand at the neighboring lane. Our
1036     /// selection is based on the type of value we are looking for. For example,
1037     /// if the neighboring lane has a load, we need to look for a load that is
1038     /// accessing a consecutive address. These strategies are summarized in the
1039     /// 'ReorderingMode' enumerator.
1040     enum class ReorderingMode {
1041       Load,     ///< Matching loads to consecutive memory addresses
1042       Opcode,   ///< Matching instructions based on opcode (same or alternate)
1043       Constant, ///< Matching constants
1044       Splat,    ///< Matching the same instruction multiple times (broadcast)
1045       Failed,   ///< We failed to create a vectorizable group
1046     };
1047 
1048     using OperandDataVec = SmallVector<OperandData, 2>;
1049 
1050     /// A vector of operand vectors.
1051     SmallVector<OperandDataVec, 4> OpsVec;
1052 
1053     const DataLayout &DL;
1054     ScalarEvolution &SE;
1055     const BoUpSLP &R;
1056 
1057     /// \returns the operand data at \p OpIdx and \p Lane.
1058     OperandData &getData(unsigned OpIdx, unsigned Lane) {
1059       return OpsVec[OpIdx][Lane];
1060     }
1061 
1062     /// \returns the operand data at \p OpIdx and \p Lane. Const version.
1063     const OperandData &getData(unsigned OpIdx, unsigned Lane) const {
1064       return OpsVec[OpIdx][Lane];
1065     }
1066 
1067     /// Clears the used flag for all entries.
1068     void clearUsed() {
1069       for (unsigned OpIdx = 0, NumOperands = getNumOperands();
1070            OpIdx != NumOperands; ++OpIdx)
1071         for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes;
1072              ++Lane)
1073           OpsVec[OpIdx][Lane].IsUsed = false;
1074     }
1075 
1076     /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2.
1077     void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) {
1078       std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]);
1079     }
1080 
1081     // The hard-coded scores listed here are not very important, though it shall
1082     // be higher for better matches to improve the resulting cost. When
1083     // computing the scores of matching one sub-tree with another, we are
1084     // basically counting the number of values that are matching. So even if all
1085     // scores are set to 1, we would still get a decent matching result.
1086     // However, sometimes we have to break ties. For example we may have to
1087     // choose between matching loads vs matching opcodes. This is what these
1088     // scores are helping us with: they provide the order of preference. Also,
1089     // this is important if the scalar is externally used or used in another
1090     // tree entry node in the different lane.
1091 
1092     /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]).
1093     static const int ScoreConsecutiveLoads = 4;
1094     /// Loads from reversed memory addresses, e.g. load(A[i+1]), load(A[i]).
1095     static const int ScoreReversedLoads = 3;
1096     /// ExtractElementInst from same vector and consecutive indexes.
1097     static const int ScoreConsecutiveExtracts = 4;
1098     /// ExtractElementInst from same vector and reversed indices.
1099     static const int ScoreReversedExtracts = 3;
1100     /// Constants.
1101     static const int ScoreConstants = 2;
1102     /// Instructions with the same opcode.
1103     static const int ScoreSameOpcode = 2;
1104     /// Instructions with alt opcodes (e.g, add + sub).
1105     static const int ScoreAltOpcodes = 1;
1106     /// Identical instructions (a.k.a. splat or broadcast).
1107     static const int ScoreSplat = 1;
1108     /// Matching with an undef is preferable to failing.
1109     static const int ScoreUndef = 1;
1110     /// Score for failing to find a decent match.
1111     static const int ScoreFail = 0;
1112     /// Score if all users are vectorized.
1113     static const int ScoreAllUserVectorized = 1;
1114 
1115     /// \returns the score of placing \p V1 and \p V2 in consecutive lanes.
1116     /// Also, checks if \p V1 and \p V2 are compatible with instructions in \p
1117     /// MainAltOps.
1118     static int getShallowScore(Value *V1, Value *V2, const DataLayout &DL,
1119                                ScalarEvolution &SE, int NumLanes,
1120                                ArrayRef<Value *> MainAltOps) {
1121       if (V1 == V2)
1122         return VLOperands::ScoreSplat;
1123 
1124       auto *LI1 = dyn_cast<LoadInst>(V1);
1125       auto *LI2 = dyn_cast<LoadInst>(V2);
1126       if (LI1 && LI2) {
1127         if (LI1->getParent() != LI2->getParent())
1128           return VLOperands::ScoreFail;
1129 
1130         Optional<int> Dist = getPointersDiff(
1131             LI1->getType(), LI1->getPointerOperand(), LI2->getType(),
1132             LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true);
1133         if (!Dist || *Dist == 0)
1134           return VLOperands::ScoreFail;
1135         // The distance is too large - still may be profitable to use masked
1136         // loads/gathers.
1137         if (std::abs(*Dist) > NumLanes / 2)
1138           return VLOperands::ScoreAltOpcodes;
1139         // This still will detect consecutive loads, but we might have "holes"
1140         // in some cases. It is ok for non-power-2 vectorization and may produce
1141         // better results. It should not affect current vectorization.
1142         return (*Dist > 0) ? VLOperands::ScoreConsecutiveLoads
1143                            : VLOperands::ScoreReversedLoads;
1144       }
1145 
1146       auto *C1 = dyn_cast<Constant>(V1);
1147       auto *C2 = dyn_cast<Constant>(V2);
1148       if (C1 && C2)
1149         return VLOperands::ScoreConstants;
1150 
1151       // Extracts from consecutive indexes of the same vector better score as
1152       // the extracts could be optimized away.
1153       Value *EV1;
1154       ConstantInt *Ex1Idx;
1155       if (match(V1, m_ExtractElt(m_Value(EV1), m_ConstantInt(Ex1Idx)))) {
1156         // Undefs are always profitable for extractelements.
1157         if (isa<UndefValue>(V2))
1158           return VLOperands::ScoreConsecutiveExtracts;
1159         Value *EV2 = nullptr;
1160         ConstantInt *Ex2Idx = nullptr;
1161         if (match(V2,
1162                   m_ExtractElt(m_Value(EV2), m_CombineOr(m_ConstantInt(Ex2Idx),
1163                                                          m_Undef())))) {
1164           // Undefs are always profitable for extractelements.
1165           if (!Ex2Idx)
1166             return VLOperands::ScoreConsecutiveExtracts;
1167           if (isUndefVector(EV2) && EV2->getType() == EV1->getType())
1168             return VLOperands::ScoreConsecutiveExtracts;
1169           if (EV2 == EV1) {
1170             int Idx1 = Ex1Idx->getZExtValue();
1171             int Idx2 = Ex2Idx->getZExtValue();
1172             int Dist = Idx2 - Idx1;
1173             // The distance is too large - still may be profitable to use
1174             // shuffles.
1175             if (std::abs(Dist) == 0)
1176               return VLOperands::ScoreSplat;
1177             if (std::abs(Dist) > NumLanes / 2)
1178               return VLOperands::ScoreSameOpcode;
1179             return (Dist > 0) ? VLOperands::ScoreConsecutiveExtracts
1180                               : VLOperands::ScoreReversedExtracts;
1181           }
1182           return VLOperands::ScoreAltOpcodes;
1183         }
1184         return VLOperands::ScoreFail;
1185       }
1186 
1187       auto *I1 = dyn_cast<Instruction>(V1);
1188       auto *I2 = dyn_cast<Instruction>(V2);
1189       if (I1 && I2) {
1190         if (I1->getParent() != I2->getParent())
1191           return VLOperands::ScoreFail;
1192         SmallVector<Value *, 4> Ops(MainAltOps.begin(), MainAltOps.end());
1193         Ops.push_back(I1);
1194         Ops.push_back(I2);
1195         InstructionsState S = getSameOpcode(Ops);
1196         // Note: Only consider instructions with <= 2 operands to avoid
1197         // complexity explosion.
1198         if (S.getOpcode() &&
1199             (S.MainOp->getNumOperands() <= 2 || !MainAltOps.empty() ||
1200              !S.isAltShuffle()) &&
1201             all_of(Ops, [&S](Value *V) {
1202               return cast<Instruction>(V)->getNumOperands() ==
1203                      S.MainOp->getNumOperands();
1204             }))
1205           return S.isAltShuffle() ? VLOperands::ScoreAltOpcodes
1206                                   : VLOperands::ScoreSameOpcode;
1207       }
1208 
1209       if (isa<UndefValue>(V2))
1210         return VLOperands::ScoreUndef;
1211 
1212       return VLOperands::ScoreFail;
1213     }
1214 
1215     /// \param Lane lane of the operands under analysis.
1216     /// \param OpIdx operand index in \p Lane lane we're looking the best
1217     /// candidate for.
1218     /// \param Idx operand index of the current candidate value.
1219     /// \returns The additional score due to possible broadcasting of the
1220     /// elements in the lane. It is more profitable to have power-of-2 unique
1221     /// elements in the lane, it will be vectorized with higher probability
1222     /// after removing duplicates. Currently the SLP vectorizer supports only
1223     /// vectorization of the power-of-2 number of unique scalars.
1224     int getSplatScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const {
1225       Value *IdxLaneV = getData(Idx, Lane).V;
1226       if (!isa<Instruction>(IdxLaneV) || IdxLaneV == getData(OpIdx, Lane).V)
1227         return 0;
1228       SmallPtrSet<Value *, 4> Uniques;
1229       for (unsigned Ln = 0, E = getNumLanes(); Ln < E; ++Ln) {
1230         if (Ln == Lane)
1231           continue;
1232         Value *OpIdxLnV = getData(OpIdx, Ln).V;
1233         if (!isa<Instruction>(OpIdxLnV))
1234           return 0;
1235         Uniques.insert(OpIdxLnV);
1236       }
1237       int UniquesCount = Uniques.size();
1238       int UniquesCntWithIdxLaneV =
1239           Uniques.contains(IdxLaneV) ? UniquesCount : UniquesCount + 1;
1240       Value *OpIdxLaneV = getData(OpIdx, Lane).V;
1241       int UniquesCntWithOpIdxLaneV =
1242           Uniques.contains(OpIdxLaneV) ? UniquesCount : UniquesCount + 1;
1243       if (UniquesCntWithIdxLaneV == UniquesCntWithOpIdxLaneV)
1244         return 0;
1245       return (PowerOf2Ceil(UniquesCntWithOpIdxLaneV) -
1246               UniquesCntWithOpIdxLaneV) -
1247              (PowerOf2Ceil(UniquesCntWithIdxLaneV) - UniquesCntWithIdxLaneV);
1248     }
1249 
1250     /// \param Lane lane of the operands under analysis.
1251     /// \param OpIdx operand index in \p Lane lane we're looking the best
1252     /// candidate for.
1253     /// \param Idx operand index of the current candidate value.
1254     /// \returns The additional score for the scalar which users are all
1255     /// vectorized.
1256     int getExternalUseScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const {
1257       Value *IdxLaneV = getData(Idx, Lane).V;
1258       Value *OpIdxLaneV = getData(OpIdx, Lane).V;
1259       // Do not care about number of uses for vector-like instructions
1260       // (extractelement/extractvalue with constant indices), they are extracts
1261       // themselves and already externally used. Vectorization of such
1262       // instructions does not add extra extractelement instruction, just may
1263       // remove it.
1264       if (isVectorLikeInstWithConstOps(IdxLaneV) &&
1265           isVectorLikeInstWithConstOps(OpIdxLaneV))
1266         return VLOperands::ScoreAllUserVectorized;
1267       auto *IdxLaneI = dyn_cast<Instruction>(IdxLaneV);
1268       if (!IdxLaneI || !isa<Instruction>(OpIdxLaneV))
1269         return 0;
1270       return R.areAllUsersVectorized(IdxLaneI, None)
1271                  ? VLOperands::ScoreAllUserVectorized
1272                  : 0;
1273     }
1274 
1275     /// Go through the operands of \p LHS and \p RHS recursively until \p
1276     /// MaxLevel, and return the cummulative score. For example:
1277     /// \verbatim
1278     ///  A[0]  B[0]  A[1]  B[1]  C[0] D[0]  B[1] A[1]
1279     ///     \ /         \ /         \ /        \ /
1280     ///      +           +           +          +
1281     ///     G1          G2          G3         G4
1282     /// \endverbatim
1283     /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at
1284     /// each level recursively, accumulating the score. It starts from matching
1285     /// the additions at level 0, then moves on to the loads (level 1). The
1286     /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and
1287     /// {B[0],B[1]} match with VLOperands::ScoreConsecutiveLoads, while
1288     /// {A[0],C[0]} has a score of VLOperands::ScoreFail.
1289     /// Please note that the order of the operands does not matter, as we
1290     /// evaluate the score of all profitable combinations of operands. In
1291     /// other words the score of G1 and G4 is the same as G1 and G2. This
1292     /// heuristic is based on ideas described in:
1293     ///   Look-ahead SLP: Auto-vectorization in the presence of commutative
1294     ///   operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha,
1295     ///   Luís F. W. Góes
1296     int getScoreAtLevelRec(Value *LHS, Value *RHS, int CurrLevel, int MaxLevel,
1297                            ArrayRef<Value *> MainAltOps) {
1298 
1299       // Get the shallow score of V1 and V2.
1300       int ShallowScoreAtThisLevel =
1301           getShallowScore(LHS, RHS, DL, SE, getNumLanes(), MainAltOps);
1302 
1303       // If reached MaxLevel,
1304       //  or if V1 and V2 are not instructions,
1305       //  or if they are SPLAT,
1306       //  or if they are not consecutive,
1307       //  or if profitable to vectorize loads or extractelements, early return
1308       //  the current cost.
1309       auto *I1 = dyn_cast<Instruction>(LHS);
1310       auto *I2 = dyn_cast<Instruction>(RHS);
1311       if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 ||
1312           ShallowScoreAtThisLevel == VLOperands::ScoreFail ||
1313           (((isa<LoadInst>(I1) && isa<LoadInst>(I2)) ||
1314             (I1->getNumOperands() > 2 && I2->getNumOperands() > 2) ||
1315             (isa<ExtractElementInst>(I1) && isa<ExtractElementInst>(I2))) &&
1316            ShallowScoreAtThisLevel))
1317         return ShallowScoreAtThisLevel;
1318       assert(I1 && I2 && "Should have early exited.");
1319 
1320       // Contains the I2 operand indexes that got matched with I1 operands.
1321       SmallSet<unsigned, 4> Op2Used;
1322 
1323       // Recursion towards the operands of I1 and I2. We are trying all possible
1324       // operand pairs, and keeping track of the best score.
1325       for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands();
1326            OpIdx1 != NumOperands1; ++OpIdx1) {
1327         // Try to pair op1I with the best operand of I2.
1328         int MaxTmpScore = 0;
1329         unsigned MaxOpIdx2 = 0;
1330         bool FoundBest = false;
1331         // If I2 is commutative try all combinations.
1332         unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1;
1333         unsigned ToIdx = isCommutative(I2)
1334                              ? I2->getNumOperands()
1335                              : std::min(I2->getNumOperands(), OpIdx1 + 1);
1336         assert(FromIdx <= ToIdx && "Bad index");
1337         for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) {
1338           // Skip operands already paired with OpIdx1.
1339           if (Op2Used.count(OpIdx2))
1340             continue;
1341           // Recursively calculate the cost at each level
1342           int TmpScore =
1343               getScoreAtLevelRec(I1->getOperand(OpIdx1), I2->getOperand(OpIdx2),
1344                                  CurrLevel + 1, MaxLevel, None);
1345           // Look for the best score.
1346           if (TmpScore > VLOperands::ScoreFail && TmpScore > MaxTmpScore) {
1347             MaxTmpScore = TmpScore;
1348             MaxOpIdx2 = OpIdx2;
1349             FoundBest = true;
1350           }
1351         }
1352         if (FoundBest) {
1353           // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it.
1354           Op2Used.insert(MaxOpIdx2);
1355           ShallowScoreAtThisLevel += MaxTmpScore;
1356         }
1357       }
1358       return ShallowScoreAtThisLevel;
1359     }
1360 
1361     /// Score scaling factor for fully compatible instructions but with
1362     /// different number of external uses. Allows better selection of the
1363     /// instructions with less external uses.
1364     static const int ScoreScaleFactor = 10;
1365 
1366     /// \Returns the look-ahead score, which tells us how much the sub-trees
1367     /// rooted at \p LHS and \p RHS match, the more they match the higher the
1368     /// score. This helps break ties in an informed way when we cannot decide on
1369     /// the order of the operands by just considering the immediate
1370     /// predecessors.
1371     int getLookAheadScore(Value *LHS, Value *RHS, ArrayRef<Value *> MainAltOps,
1372                           int Lane, unsigned OpIdx, unsigned Idx,
1373                           bool &IsUsed) {
1374       int Score =
1375           getScoreAtLevelRec(LHS, RHS, 1, LookAheadMaxDepth, MainAltOps);
1376       if (Score) {
1377         int SplatScore = getSplatScore(Lane, OpIdx, Idx);
1378         if (Score <= -SplatScore) {
1379           // Set the minimum score for splat-like sequence to avoid setting
1380           // failed state.
1381           Score = 1;
1382         } else {
1383           Score += SplatScore;
1384           // Scale score to see the difference between different operands
1385           // and similar operands but all vectorized/not all vectorized
1386           // uses. It does not affect actual selection of the best
1387           // compatible operand in general, just allows to select the
1388           // operand with all vectorized uses.
1389           Score *= ScoreScaleFactor;
1390           Score += getExternalUseScore(Lane, OpIdx, Idx);
1391           IsUsed = true;
1392         }
1393       }
1394       return Score;
1395     }
1396 
1397     /// Best defined scores per lanes between the passes. Used to choose the
1398     /// best operand (with the highest score) between the passes.
1399     /// The key - {Operand Index, Lane}.
1400     /// The value - the best score between the passes for the lane and the
1401     /// operand.
1402     SmallDenseMap<std::pair<unsigned, unsigned>, unsigned, 8>
1403         BestScoresPerLanes;
1404 
1405     // Search all operands in Ops[*][Lane] for the one that matches best
1406     // Ops[OpIdx][LastLane] and return its opreand index.
1407     // If no good match can be found, return None.
1408     Optional<unsigned> getBestOperand(unsigned OpIdx, int Lane, int LastLane,
1409                                       ArrayRef<ReorderingMode> ReorderingModes,
1410                                       ArrayRef<Value *> MainAltOps) {
1411       unsigned NumOperands = getNumOperands();
1412 
1413       // The operand of the previous lane at OpIdx.
1414       Value *OpLastLane = getData(OpIdx, LastLane).V;
1415 
1416       // Our strategy mode for OpIdx.
1417       ReorderingMode RMode = ReorderingModes[OpIdx];
1418       if (RMode == ReorderingMode::Failed)
1419         return None;
1420 
1421       // The linearized opcode of the operand at OpIdx, Lane.
1422       bool OpIdxAPO = getData(OpIdx, Lane).APO;
1423 
1424       // The best operand index and its score.
1425       // Sometimes we have more than one option (e.g., Opcode and Undefs), so we
1426       // are using the score to differentiate between the two.
1427       struct BestOpData {
1428         Optional<unsigned> Idx = None;
1429         unsigned Score = 0;
1430       } BestOp;
1431       BestOp.Score =
1432           BestScoresPerLanes.try_emplace(std::make_pair(OpIdx, Lane), 0)
1433               .first->second;
1434 
1435       // Track if the operand must be marked as used. If the operand is set to
1436       // Score 1 explicitly (because of non power-of-2 unique scalars, we may
1437       // want to reestimate the operands again on the following iterations).
1438       bool IsUsed =
1439           RMode == ReorderingMode::Splat || RMode == ReorderingMode::Constant;
1440       // Iterate through all unused operands and look for the best.
1441       for (unsigned Idx = 0; Idx != NumOperands; ++Idx) {
1442         // Get the operand at Idx and Lane.
1443         OperandData &OpData = getData(Idx, Lane);
1444         Value *Op = OpData.V;
1445         bool OpAPO = OpData.APO;
1446 
1447         // Skip already selected operands.
1448         if (OpData.IsUsed)
1449           continue;
1450 
1451         // Skip if we are trying to move the operand to a position with a
1452         // different opcode in the linearized tree form. This would break the
1453         // semantics.
1454         if (OpAPO != OpIdxAPO)
1455           continue;
1456 
1457         // Look for an operand that matches the current mode.
1458         switch (RMode) {
1459         case ReorderingMode::Load:
1460         case ReorderingMode::Constant:
1461         case ReorderingMode::Opcode: {
1462           bool LeftToRight = Lane > LastLane;
1463           Value *OpLeft = (LeftToRight) ? OpLastLane : Op;
1464           Value *OpRight = (LeftToRight) ? Op : OpLastLane;
1465           int Score = getLookAheadScore(OpLeft, OpRight, MainAltOps, Lane,
1466                                         OpIdx, Idx, IsUsed);
1467           if (Score > static_cast<int>(BestOp.Score)) {
1468             BestOp.Idx = Idx;
1469             BestOp.Score = Score;
1470             BestScoresPerLanes[std::make_pair(OpIdx, Lane)] = Score;
1471           }
1472           break;
1473         }
1474         case ReorderingMode::Splat:
1475           if (Op == OpLastLane)
1476             BestOp.Idx = Idx;
1477           break;
1478         case ReorderingMode::Failed:
1479           llvm_unreachable("Not expected Failed reordering mode.");
1480         }
1481       }
1482 
1483       if (BestOp.Idx) {
1484         getData(BestOp.Idx.getValue(), Lane).IsUsed = IsUsed;
1485         return BestOp.Idx;
1486       }
1487       // If we could not find a good match return None.
1488       return None;
1489     }
1490 
1491     /// Helper for reorderOperandVecs.
1492     /// \returns the lane that we should start reordering from. This is the one
1493     /// which has the least number of operands that can freely move about or
1494     /// less profitable because it already has the most optimal set of operands.
1495     unsigned getBestLaneToStartReordering() const {
1496       unsigned Min = UINT_MAX;
1497       unsigned SameOpNumber = 0;
1498       // std::pair<unsigned, unsigned> is used to implement a simple voting
1499       // algorithm and choose the lane with the least number of operands that
1500       // can freely move about or less profitable because it already has the
1501       // most optimal set of operands. The first unsigned is a counter for
1502       // voting, the second unsigned is the counter of lanes with instructions
1503       // with same/alternate opcodes and same parent basic block.
1504       MapVector<unsigned, std::pair<unsigned, unsigned>> HashMap;
1505       // Try to be closer to the original results, if we have multiple lanes
1506       // with same cost. If 2 lanes have the same cost, use the one with the
1507       // lowest index.
1508       for (int I = getNumLanes(); I > 0; --I) {
1509         unsigned Lane = I - 1;
1510         OperandsOrderData NumFreeOpsHash =
1511             getMaxNumOperandsThatCanBeReordered(Lane);
1512         // Compare the number of operands that can move and choose the one with
1513         // the least number.
1514         if (NumFreeOpsHash.NumOfAPOs < Min) {
1515           Min = NumFreeOpsHash.NumOfAPOs;
1516           SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent;
1517           HashMap.clear();
1518           HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
1519         } else if (NumFreeOpsHash.NumOfAPOs == Min &&
1520                    NumFreeOpsHash.NumOpsWithSameOpcodeParent < SameOpNumber) {
1521           // Select the most optimal lane in terms of number of operands that
1522           // should be moved around.
1523           SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent;
1524           HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
1525         } else if (NumFreeOpsHash.NumOfAPOs == Min &&
1526                    NumFreeOpsHash.NumOpsWithSameOpcodeParent == SameOpNumber) {
1527           auto It = HashMap.find(NumFreeOpsHash.Hash);
1528           if (It == HashMap.end())
1529             HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
1530           else
1531             ++It->second.first;
1532         }
1533       }
1534       // Select the lane with the minimum counter.
1535       unsigned BestLane = 0;
1536       unsigned CntMin = UINT_MAX;
1537       for (const auto &Data : reverse(HashMap)) {
1538         if (Data.second.first < CntMin) {
1539           CntMin = Data.second.first;
1540           BestLane = Data.second.second;
1541         }
1542       }
1543       return BestLane;
1544     }
1545 
1546     /// Data structure that helps to reorder operands.
1547     struct OperandsOrderData {
1548       /// The best number of operands with the same APOs, which can be
1549       /// reordered.
1550       unsigned NumOfAPOs = UINT_MAX;
1551       /// Number of operands with the same/alternate instruction opcode and
1552       /// parent.
1553       unsigned NumOpsWithSameOpcodeParent = 0;
1554       /// Hash for the actual operands ordering.
1555       /// Used to count operands, actually their position id and opcode
1556       /// value. It is used in the voting mechanism to find the lane with the
1557       /// least number of operands that can freely move about or less profitable
1558       /// because it already has the most optimal set of operands. Can be
1559       /// replaced with SmallVector<unsigned> instead but hash code is faster
1560       /// and requires less memory.
1561       unsigned Hash = 0;
1562     };
1563     /// \returns the maximum number of operands that are allowed to be reordered
1564     /// for \p Lane and the number of compatible instructions(with the same
1565     /// parent/opcode). This is used as a heuristic for selecting the first lane
1566     /// to start operand reordering.
1567     OperandsOrderData getMaxNumOperandsThatCanBeReordered(unsigned Lane) const {
1568       unsigned CntTrue = 0;
1569       unsigned NumOperands = getNumOperands();
1570       // Operands with the same APO can be reordered. We therefore need to count
1571       // how many of them we have for each APO, like this: Cnt[APO] = x.
1572       // Since we only have two APOs, namely true and false, we can avoid using
1573       // a map. Instead we can simply count the number of operands that
1574       // correspond to one of them (in this case the 'true' APO), and calculate
1575       // the other by subtracting it from the total number of operands.
1576       // Operands with the same instruction opcode and parent are more
1577       // profitable since we don't need to move them in many cases, with a high
1578       // probability such lane already can be vectorized effectively.
1579       bool AllUndefs = true;
1580       unsigned NumOpsWithSameOpcodeParent = 0;
1581       Instruction *OpcodeI = nullptr;
1582       BasicBlock *Parent = nullptr;
1583       unsigned Hash = 0;
1584       for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1585         const OperandData &OpData = getData(OpIdx, Lane);
1586         if (OpData.APO)
1587           ++CntTrue;
1588         // Use Boyer-Moore majority voting for finding the majority opcode and
1589         // the number of times it occurs.
1590         if (auto *I = dyn_cast<Instruction>(OpData.V)) {
1591           if (!OpcodeI || !getSameOpcode({OpcodeI, I}).getOpcode() ||
1592               I->getParent() != Parent) {
1593             if (NumOpsWithSameOpcodeParent == 0) {
1594               NumOpsWithSameOpcodeParent = 1;
1595               OpcodeI = I;
1596               Parent = I->getParent();
1597             } else {
1598               --NumOpsWithSameOpcodeParent;
1599             }
1600           } else {
1601             ++NumOpsWithSameOpcodeParent;
1602           }
1603         }
1604         Hash = hash_combine(
1605             Hash, hash_value((OpIdx + 1) * (OpData.V->getValueID() + 1)));
1606         AllUndefs = AllUndefs && isa<UndefValue>(OpData.V);
1607       }
1608       if (AllUndefs)
1609         return {};
1610       OperandsOrderData Data;
1611       Data.NumOfAPOs = std::max(CntTrue, NumOperands - CntTrue);
1612       Data.NumOpsWithSameOpcodeParent = NumOpsWithSameOpcodeParent;
1613       Data.Hash = Hash;
1614       return Data;
1615     }
1616 
1617     /// Go through the instructions in VL and append their operands.
1618     void appendOperandsOfVL(ArrayRef<Value *> VL) {
1619       assert(!VL.empty() && "Bad VL");
1620       assert((empty() || VL.size() == getNumLanes()) &&
1621              "Expected same number of lanes");
1622       assert(isa<Instruction>(VL[0]) && "Expected instruction");
1623       unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands();
1624       OpsVec.resize(NumOperands);
1625       unsigned NumLanes = VL.size();
1626       for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1627         OpsVec[OpIdx].resize(NumLanes);
1628         for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1629           assert(isa<Instruction>(VL[Lane]) && "Expected instruction");
1630           // Our tree has just 3 nodes: the root and two operands.
1631           // It is therefore trivial to get the APO. We only need to check the
1632           // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or
1633           // RHS operand. The LHS operand of both add and sub is never attached
1634           // to an inversese operation in the linearized form, therefore its APO
1635           // is false. The RHS is true only if VL[Lane] is an inverse operation.
1636 
1637           // Since operand reordering is performed on groups of commutative
1638           // operations or alternating sequences (e.g., +, -), we can safely
1639           // tell the inverse operations by checking commutativity.
1640           bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane]));
1641           bool APO = (OpIdx == 0) ? false : IsInverseOperation;
1642           OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx),
1643                                  APO, false};
1644         }
1645       }
1646     }
1647 
1648     /// \returns the number of operands.
1649     unsigned getNumOperands() const { return OpsVec.size(); }
1650 
1651     /// \returns the number of lanes.
1652     unsigned getNumLanes() const { return OpsVec[0].size(); }
1653 
1654     /// \returns the operand value at \p OpIdx and \p Lane.
1655     Value *getValue(unsigned OpIdx, unsigned Lane) const {
1656       return getData(OpIdx, Lane).V;
1657     }
1658 
1659     /// \returns true if the data structure is empty.
1660     bool empty() const { return OpsVec.empty(); }
1661 
1662     /// Clears the data.
1663     void clear() { OpsVec.clear(); }
1664 
1665     /// \Returns true if there are enough operands identical to \p Op to fill
1666     /// the whole vector.
1667     /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow.
1668     bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) {
1669       bool OpAPO = getData(OpIdx, Lane).APO;
1670       for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) {
1671         if (Ln == Lane)
1672           continue;
1673         // This is set to true if we found a candidate for broadcast at Lane.
1674         bool FoundCandidate = false;
1675         for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) {
1676           OperandData &Data = getData(OpI, Ln);
1677           if (Data.APO != OpAPO || Data.IsUsed)
1678             continue;
1679           if (Data.V == Op) {
1680             FoundCandidate = true;
1681             Data.IsUsed = true;
1682             break;
1683           }
1684         }
1685         if (!FoundCandidate)
1686           return false;
1687       }
1688       return true;
1689     }
1690 
1691   public:
1692     /// Initialize with all the operands of the instruction vector \p RootVL.
1693     VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL,
1694                ScalarEvolution &SE, const BoUpSLP &R)
1695         : DL(DL), SE(SE), R(R) {
1696       // Append all the operands of RootVL.
1697       appendOperandsOfVL(RootVL);
1698     }
1699 
1700     /// \Returns a value vector with the operands across all lanes for the
1701     /// opearnd at \p OpIdx.
1702     ValueList getVL(unsigned OpIdx) const {
1703       ValueList OpVL(OpsVec[OpIdx].size());
1704       assert(OpsVec[OpIdx].size() == getNumLanes() &&
1705              "Expected same num of lanes across all operands");
1706       for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane)
1707         OpVL[Lane] = OpsVec[OpIdx][Lane].V;
1708       return OpVL;
1709     }
1710 
1711     // Performs operand reordering for 2 or more operands.
1712     // The original operands are in OrigOps[OpIdx][Lane].
1713     // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'.
1714     void reorder() {
1715       unsigned NumOperands = getNumOperands();
1716       unsigned NumLanes = getNumLanes();
1717       // Each operand has its own mode. We are using this mode to help us select
1718       // the instructions for each lane, so that they match best with the ones
1719       // we have selected so far.
1720       SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands);
1721 
1722       // This is a greedy single-pass algorithm. We are going over each lane
1723       // once and deciding on the best order right away with no back-tracking.
1724       // However, in order to increase its effectiveness, we start with the lane
1725       // that has operands that can move the least. For example, given the
1726       // following lanes:
1727       //  Lane 0 : A[0] = B[0] + C[0]   // Visited 3rd
1728       //  Lane 1 : A[1] = C[1] - B[1]   // Visited 1st
1729       //  Lane 2 : A[2] = B[2] + C[2]   // Visited 2nd
1730       //  Lane 3 : A[3] = C[3] - B[3]   // Visited 4th
1731       // we will start at Lane 1, since the operands of the subtraction cannot
1732       // be reordered. Then we will visit the rest of the lanes in a circular
1733       // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3.
1734 
1735       // Find the first lane that we will start our search from.
1736       unsigned FirstLane = getBestLaneToStartReordering();
1737 
1738       // Initialize the modes.
1739       for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1740         Value *OpLane0 = getValue(OpIdx, FirstLane);
1741         // Keep track if we have instructions with all the same opcode on one
1742         // side.
1743         if (isa<LoadInst>(OpLane0))
1744           ReorderingModes[OpIdx] = ReorderingMode::Load;
1745         else if (isa<Instruction>(OpLane0)) {
1746           // Check if OpLane0 should be broadcast.
1747           if (shouldBroadcast(OpLane0, OpIdx, FirstLane))
1748             ReorderingModes[OpIdx] = ReorderingMode::Splat;
1749           else
1750             ReorderingModes[OpIdx] = ReorderingMode::Opcode;
1751         }
1752         else if (isa<Constant>(OpLane0))
1753           ReorderingModes[OpIdx] = ReorderingMode::Constant;
1754         else if (isa<Argument>(OpLane0))
1755           // Our best hope is a Splat. It may save some cost in some cases.
1756           ReorderingModes[OpIdx] = ReorderingMode::Splat;
1757         else
1758           // NOTE: This should be unreachable.
1759           ReorderingModes[OpIdx] = ReorderingMode::Failed;
1760       }
1761 
1762       // Check that we don't have same operands. No need to reorder if operands
1763       // are just perfect diamond or shuffled diamond match. Do not do it only
1764       // for possible broadcasts or non-power of 2 number of scalars (just for
1765       // now).
1766       auto &&SkipReordering = [this]() {
1767         SmallPtrSet<Value *, 4> UniqueValues;
1768         ArrayRef<OperandData> Op0 = OpsVec.front();
1769         for (const OperandData &Data : Op0)
1770           UniqueValues.insert(Data.V);
1771         for (ArrayRef<OperandData> Op : drop_begin(OpsVec, 1)) {
1772           if (any_of(Op, [&UniqueValues](const OperandData &Data) {
1773                 return !UniqueValues.contains(Data.V);
1774               }))
1775             return false;
1776         }
1777         // TODO: Check if we can remove a check for non-power-2 number of
1778         // scalars after full support of non-power-2 vectorization.
1779         return UniqueValues.size() != 2 && isPowerOf2_32(UniqueValues.size());
1780       };
1781 
1782       // If the initial strategy fails for any of the operand indexes, then we
1783       // perform reordering again in a second pass. This helps avoid assigning
1784       // high priority to the failed strategy, and should improve reordering for
1785       // the non-failed operand indexes.
1786       for (int Pass = 0; Pass != 2; ++Pass) {
1787         // Check if no need to reorder operands since they're are perfect or
1788         // shuffled diamond match.
1789         // Need to to do it to avoid extra external use cost counting for
1790         // shuffled matches, which may cause regressions.
1791         if (SkipReordering())
1792           break;
1793         // Skip the second pass if the first pass did not fail.
1794         bool StrategyFailed = false;
1795         // Mark all operand data as free to use.
1796         clearUsed();
1797         // We keep the original operand order for the FirstLane, so reorder the
1798         // rest of the lanes. We are visiting the nodes in a circular fashion,
1799         // using FirstLane as the center point and increasing the radius
1800         // distance.
1801         SmallVector<SmallVector<Value *, 2>> MainAltOps(NumOperands);
1802         for (unsigned I = 0; I < NumOperands; ++I)
1803           MainAltOps[I].push_back(getData(I, FirstLane).V);
1804 
1805         for (unsigned Distance = 1; Distance != NumLanes; ++Distance) {
1806           // Visit the lane on the right and then the lane on the left.
1807           for (int Direction : {+1, -1}) {
1808             int Lane = FirstLane + Direction * Distance;
1809             if (Lane < 0 || Lane >= (int)NumLanes)
1810               continue;
1811             int LastLane = Lane - Direction;
1812             assert(LastLane >= 0 && LastLane < (int)NumLanes &&
1813                    "Out of bounds");
1814             // Look for a good match for each operand.
1815             for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1816               // Search for the operand that matches SortedOps[OpIdx][Lane-1].
1817               Optional<unsigned> BestIdx = getBestOperand(
1818                   OpIdx, Lane, LastLane, ReorderingModes, MainAltOps[OpIdx]);
1819               // By not selecting a value, we allow the operands that follow to
1820               // select a better matching value. We will get a non-null value in
1821               // the next run of getBestOperand().
1822               if (BestIdx) {
1823                 // Swap the current operand with the one returned by
1824                 // getBestOperand().
1825                 swap(OpIdx, BestIdx.getValue(), Lane);
1826               } else {
1827                 // We failed to find a best operand, set mode to 'Failed'.
1828                 ReorderingModes[OpIdx] = ReorderingMode::Failed;
1829                 // Enable the second pass.
1830                 StrategyFailed = true;
1831               }
1832               // Try to get the alternate opcode and follow it during analysis.
1833               if (MainAltOps[OpIdx].size() != 2) {
1834                 OperandData &AltOp = getData(OpIdx, Lane);
1835                 InstructionsState OpS =
1836                     getSameOpcode({MainAltOps[OpIdx].front(), AltOp.V});
1837                 if (OpS.getOpcode() && OpS.isAltShuffle())
1838                   MainAltOps[OpIdx].push_back(AltOp.V);
1839               }
1840             }
1841           }
1842         }
1843         // Skip second pass if the strategy did not fail.
1844         if (!StrategyFailed)
1845           break;
1846       }
1847     }
1848 
1849 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1850     LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) {
1851       switch (RMode) {
1852       case ReorderingMode::Load:
1853         return "Load";
1854       case ReorderingMode::Opcode:
1855         return "Opcode";
1856       case ReorderingMode::Constant:
1857         return "Constant";
1858       case ReorderingMode::Splat:
1859         return "Splat";
1860       case ReorderingMode::Failed:
1861         return "Failed";
1862       }
1863       llvm_unreachable("Unimplemented Reordering Type");
1864     }
1865 
1866     LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode,
1867                                                    raw_ostream &OS) {
1868       return OS << getModeStr(RMode);
1869     }
1870 
1871     /// Debug print.
1872     LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) {
1873       printMode(RMode, dbgs());
1874     }
1875 
1876     friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) {
1877       return printMode(RMode, OS);
1878     }
1879 
1880     LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const {
1881       const unsigned Indent = 2;
1882       unsigned Cnt = 0;
1883       for (const OperandDataVec &OpDataVec : OpsVec) {
1884         OS << "Operand " << Cnt++ << "\n";
1885         for (const OperandData &OpData : OpDataVec) {
1886           OS.indent(Indent) << "{";
1887           if (Value *V = OpData.V)
1888             OS << *V;
1889           else
1890             OS << "null";
1891           OS << ", APO:" << OpData.APO << "}\n";
1892         }
1893         OS << "\n";
1894       }
1895       return OS;
1896     }
1897 
1898     /// Debug print.
1899     LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
1900 #endif
1901   };
1902 
1903   /// Checks if the instruction is marked for deletion.
1904   bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); }
1905 
1906   /// Marks values operands for later deletion by replacing them with Undefs.
1907   void eraseInstructions(ArrayRef<Value *> AV);
1908 
1909   ~BoUpSLP();
1910 
1911 private:
1912   /// Check if the operands on the edges \p Edges of the \p UserTE allows
1913   /// reordering (i.e. the operands can be reordered because they have only one
1914   /// user and reordarable).
1915   /// \param NonVectorized List of all gather nodes that require reordering
1916   /// (e.g., gather of extractlements or partially vectorizable loads).
1917   /// \param GatherOps List of gather operand nodes for \p UserTE that require
1918   /// reordering, subset of \p NonVectorized.
1919   bool
1920   canReorderOperands(TreeEntry *UserTE,
1921                      SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges,
1922                      ArrayRef<TreeEntry *> ReorderableGathers,
1923                      SmallVectorImpl<TreeEntry *> &GatherOps);
1924 
1925   /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph,
1926   /// if any. If it is not vectorized (gather node), returns nullptr.
1927   TreeEntry *getVectorizedOperand(TreeEntry *UserTE, unsigned OpIdx) {
1928     ArrayRef<Value *> VL = UserTE->getOperand(OpIdx);
1929     TreeEntry *TE = nullptr;
1930     const auto *It = find_if(VL, [this, &TE](Value *V) {
1931       TE = getTreeEntry(V);
1932       return TE;
1933     });
1934     if (It != VL.end() && TE->isSame(VL))
1935       return TE;
1936     return nullptr;
1937   }
1938 
1939   /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph,
1940   /// if any. If it is not vectorized (gather node), returns nullptr.
1941   const TreeEntry *getVectorizedOperand(const TreeEntry *UserTE,
1942                                         unsigned OpIdx) const {
1943     return const_cast<BoUpSLP *>(this)->getVectorizedOperand(
1944         const_cast<TreeEntry *>(UserTE), OpIdx);
1945   }
1946 
1947   /// Checks if all users of \p I are the part of the vectorization tree.
1948   bool areAllUsersVectorized(Instruction *I,
1949                              ArrayRef<Value *> VectorizedVals) const;
1950 
1951   /// \returns the cost of the vectorizable entry.
1952   InstructionCost getEntryCost(const TreeEntry *E,
1953                                ArrayRef<Value *> VectorizedVals);
1954 
1955   /// This is the recursive part of buildTree.
1956   void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth,
1957                      const EdgeInfo &EI);
1958 
1959   /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can
1960   /// be vectorized to use the original vector (or aggregate "bitcast" to a
1961   /// vector) and sets \p CurrentOrder to the identity permutation; otherwise
1962   /// returns false, setting \p CurrentOrder to either an empty vector or a
1963   /// non-identity permutation that allows to reuse extract instructions.
1964   bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
1965                        SmallVectorImpl<unsigned> &CurrentOrder) const;
1966 
1967   /// Vectorize a single entry in the tree.
1968   Value *vectorizeTree(TreeEntry *E);
1969 
1970   /// Vectorize a single entry in the tree, starting in \p VL.
1971   Value *vectorizeTree(ArrayRef<Value *> VL);
1972 
1973   /// Create a new vector from a list of scalar values.  Produces a sequence
1974   /// which exploits values reused across lanes, and arranges the inserts
1975   /// for ease of later optimization.
1976   Value *createBuildVector(ArrayRef<Value *> VL);
1977 
1978   /// \returns the scalarization cost for this type. Scalarization in this
1979   /// context means the creation of vectors from a group of scalars. If \p
1980   /// NeedToShuffle is true, need to add a cost of reshuffling some of the
1981   /// vector elements.
1982   InstructionCost getGatherCost(FixedVectorType *Ty,
1983                                 const APInt &ShuffledIndices,
1984                                 bool NeedToShuffle) const;
1985 
1986   /// Checks if the gathered \p VL can be represented as shuffle(s) of previous
1987   /// tree entries.
1988   /// \returns ShuffleKind, if gathered values can be represented as shuffles of
1989   /// previous tree entries. \p Mask is filled with the shuffle mask.
1990   Optional<TargetTransformInfo::ShuffleKind>
1991   isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask,
1992                         SmallVectorImpl<const TreeEntry *> &Entries);
1993 
1994   /// \returns the scalarization cost for this list of values. Assuming that
1995   /// this subtree gets vectorized, we may need to extract the values from the
1996   /// roots. This method calculates the cost of extracting the values.
1997   InstructionCost getGatherCost(ArrayRef<Value *> VL) const;
1998 
1999   /// Set the Builder insert point to one after the last instruction in
2000   /// the bundle
2001   void setInsertPointAfterBundle(const TreeEntry *E);
2002 
2003   /// \returns a vector from a collection of scalars in \p VL.
2004   Value *gather(ArrayRef<Value *> VL);
2005 
2006   /// \returns whether the VectorizableTree is fully vectorizable and will
2007   /// be beneficial even the tree height is tiny.
2008   bool isFullyVectorizableTinyTree(bool ForReduction) const;
2009 
2010   /// Reorder commutative or alt operands to get better probability of
2011   /// generating vectorized code.
2012   static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
2013                                              SmallVectorImpl<Value *> &Left,
2014                                              SmallVectorImpl<Value *> &Right,
2015                                              const DataLayout &DL,
2016                                              ScalarEvolution &SE,
2017                                              const BoUpSLP &R);
2018   struct TreeEntry {
2019     using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>;
2020     TreeEntry(VecTreeTy &Container) : Container(Container) {}
2021 
2022     /// \returns true if the scalars in VL are equal to this entry.
2023     bool isSame(ArrayRef<Value *> VL) const {
2024       auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) {
2025         if (Mask.size() != VL.size() && VL.size() == Scalars.size())
2026           return std::equal(VL.begin(), VL.end(), Scalars.begin());
2027         return VL.size() == Mask.size() &&
2028                std::equal(VL.begin(), VL.end(), Mask.begin(),
2029                           [Scalars](Value *V, int Idx) {
2030                             return (isa<UndefValue>(V) &&
2031                                     Idx == UndefMaskElem) ||
2032                                    (Idx != UndefMaskElem && V == Scalars[Idx]);
2033                           });
2034       };
2035       if (!ReorderIndices.empty()) {
2036         // TODO: implement matching if the nodes are just reordered, still can
2037         // treat the vector as the same if the list of scalars matches VL
2038         // directly, without reordering.
2039         SmallVector<int> Mask;
2040         inversePermutation(ReorderIndices, Mask);
2041         if (VL.size() == Scalars.size())
2042           return IsSame(Scalars, Mask);
2043         if (VL.size() == ReuseShuffleIndices.size()) {
2044           ::addMask(Mask, ReuseShuffleIndices);
2045           return IsSame(Scalars, Mask);
2046         }
2047         return false;
2048       }
2049       return IsSame(Scalars, ReuseShuffleIndices);
2050     }
2051 
2052     /// \returns true if current entry has same operands as \p TE.
2053     bool hasEqualOperands(const TreeEntry &TE) const {
2054       if (TE.getNumOperands() != getNumOperands())
2055         return false;
2056       SmallBitVector Used(getNumOperands());
2057       for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
2058         unsigned PrevCount = Used.count();
2059         for (unsigned K = 0; K < E; ++K) {
2060           if (Used.test(K))
2061             continue;
2062           if (getOperand(K) == TE.getOperand(I)) {
2063             Used.set(K);
2064             break;
2065           }
2066         }
2067         // Check if we actually found the matching operand.
2068         if (PrevCount == Used.count())
2069           return false;
2070       }
2071       return true;
2072     }
2073 
2074     /// \return Final vectorization factor for the node. Defined by the total
2075     /// number of vectorized scalars, including those, used several times in the
2076     /// entry and counted in the \a ReuseShuffleIndices, if any.
2077     unsigned getVectorFactor() const {
2078       if (!ReuseShuffleIndices.empty())
2079         return ReuseShuffleIndices.size();
2080       return Scalars.size();
2081     };
2082 
2083     /// A vector of scalars.
2084     ValueList Scalars;
2085 
2086     /// The Scalars are vectorized into this value. It is initialized to Null.
2087     Value *VectorizedValue = nullptr;
2088 
2089     /// Do we need to gather this sequence or vectorize it
2090     /// (either with vector instruction or with scatter/gather
2091     /// intrinsics for store/load)?
2092     enum EntryState { Vectorize, ScatterVectorize, NeedToGather };
2093     EntryState State;
2094 
2095     /// Does this sequence require some shuffling?
2096     SmallVector<int, 4> ReuseShuffleIndices;
2097 
2098     /// Does this entry require reordering?
2099     SmallVector<unsigned, 4> ReorderIndices;
2100 
2101     /// Points back to the VectorizableTree.
2102     ///
2103     /// Only used for Graphviz right now.  Unfortunately GraphTrait::NodeRef has
2104     /// to be a pointer and needs to be able to initialize the child iterator.
2105     /// Thus we need a reference back to the container to translate the indices
2106     /// to entries.
2107     VecTreeTy &Container;
2108 
2109     /// The TreeEntry index containing the user of this entry.  We can actually
2110     /// have multiple users so the data structure is not truly a tree.
2111     SmallVector<EdgeInfo, 1> UserTreeIndices;
2112 
2113     /// The index of this treeEntry in VectorizableTree.
2114     int Idx = -1;
2115 
2116   private:
2117     /// The operands of each instruction in each lane Operands[op_index][lane].
2118     /// Note: This helps avoid the replication of the code that performs the
2119     /// reordering of operands during buildTree_rec() and vectorizeTree().
2120     SmallVector<ValueList, 2> Operands;
2121 
2122     /// The main/alternate instruction.
2123     Instruction *MainOp = nullptr;
2124     Instruction *AltOp = nullptr;
2125 
2126   public:
2127     /// Set this bundle's \p OpIdx'th operand to \p OpVL.
2128     void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) {
2129       if (Operands.size() < OpIdx + 1)
2130         Operands.resize(OpIdx + 1);
2131       assert(Operands[OpIdx].empty() && "Already resized?");
2132       assert(OpVL.size() <= Scalars.size() &&
2133              "Number of operands is greater than the number of scalars.");
2134       Operands[OpIdx].resize(OpVL.size());
2135       copy(OpVL, Operands[OpIdx].begin());
2136     }
2137 
2138     /// Set the operands of this bundle in their original order.
2139     void setOperandsInOrder() {
2140       assert(Operands.empty() && "Already initialized?");
2141       auto *I0 = cast<Instruction>(Scalars[0]);
2142       Operands.resize(I0->getNumOperands());
2143       unsigned NumLanes = Scalars.size();
2144       for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands();
2145            OpIdx != NumOperands; ++OpIdx) {
2146         Operands[OpIdx].resize(NumLanes);
2147         for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
2148           auto *I = cast<Instruction>(Scalars[Lane]);
2149           assert(I->getNumOperands() == NumOperands &&
2150                  "Expected same number of operands");
2151           Operands[OpIdx][Lane] = I->getOperand(OpIdx);
2152         }
2153       }
2154     }
2155 
2156     /// Reorders operands of the node to the given mask \p Mask.
2157     void reorderOperands(ArrayRef<int> Mask) {
2158       for (ValueList &Operand : Operands)
2159         reorderScalars(Operand, Mask);
2160     }
2161 
2162     /// \returns the \p OpIdx operand of this TreeEntry.
2163     ValueList &getOperand(unsigned OpIdx) {
2164       assert(OpIdx < Operands.size() && "Off bounds");
2165       return Operands[OpIdx];
2166     }
2167 
2168     /// \returns the \p OpIdx operand of this TreeEntry.
2169     ArrayRef<Value *> getOperand(unsigned OpIdx) const {
2170       assert(OpIdx < Operands.size() && "Off bounds");
2171       return Operands[OpIdx];
2172     }
2173 
2174     /// \returns the number of operands.
2175     unsigned getNumOperands() const { return Operands.size(); }
2176 
2177     /// \return the single \p OpIdx operand.
2178     Value *getSingleOperand(unsigned OpIdx) const {
2179       assert(OpIdx < Operands.size() && "Off bounds");
2180       assert(!Operands[OpIdx].empty() && "No operand available");
2181       return Operands[OpIdx][0];
2182     }
2183 
2184     /// Some of the instructions in the list have alternate opcodes.
2185     bool isAltShuffle() const { return MainOp != AltOp; }
2186 
2187     bool isOpcodeOrAlt(Instruction *I) const {
2188       unsigned CheckedOpcode = I->getOpcode();
2189       return (getOpcode() == CheckedOpcode ||
2190               getAltOpcode() == CheckedOpcode);
2191     }
2192 
2193     /// Chooses the correct key for scheduling data. If \p Op has the same (or
2194     /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is
2195     /// \p OpValue.
2196     Value *isOneOf(Value *Op) const {
2197       auto *I = dyn_cast<Instruction>(Op);
2198       if (I && isOpcodeOrAlt(I))
2199         return Op;
2200       return MainOp;
2201     }
2202 
2203     void setOperations(const InstructionsState &S) {
2204       MainOp = S.MainOp;
2205       AltOp = S.AltOp;
2206     }
2207 
2208     Instruction *getMainOp() const {
2209       return MainOp;
2210     }
2211 
2212     Instruction *getAltOp() const {
2213       return AltOp;
2214     }
2215 
2216     /// The main/alternate opcodes for the list of instructions.
2217     unsigned getOpcode() const {
2218       return MainOp ? MainOp->getOpcode() : 0;
2219     }
2220 
2221     unsigned getAltOpcode() const {
2222       return AltOp ? AltOp->getOpcode() : 0;
2223     }
2224 
2225     /// When ReuseReorderShuffleIndices is empty it just returns position of \p
2226     /// V within vector of Scalars. Otherwise, try to remap on its reuse index.
2227     int findLaneForValue(Value *V) const {
2228       unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V));
2229       assert(FoundLane < Scalars.size() && "Couldn't find extract lane");
2230       if (!ReorderIndices.empty())
2231         FoundLane = ReorderIndices[FoundLane];
2232       assert(FoundLane < Scalars.size() && "Couldn't find extract lane");
2233       if (!ReuseShuffleIndices.empty()) {
2234         FoundLane = std::distance(ReuseShuffleIndices.begin(),
2235                                   find(ReuseShuffleIndices, FoundLane));
2236       }
2237       return FoundLane;
2238     }
2239 
2240 #ifndef NDEBUG
2241     /// Debug printer.
2242     LLVM_DUMP_METHOD void dump() const {
2243       dbgs() << Idx << ".\n";
2244       for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) {
2245         dbgs() << "Operand " << OpI << ":\n";
2246         for (const Value *V : Operands[OpI])
2247           dbgs().indent(2) << *V << "\n";
2248       }
2249       dbgs() << "Scalars: \n";
2250       for (Value *V : Scalars)
2251         dbgs().indent(2) << *V << "\n";
2252       dbgs() << "State: ";
2253       switch (State) {
2254       case Vectorize:
2255         dbgs() << "Vectorize\n";
2256         break;
2257       case ScatterVectorize:
2258         dbgs() << "ScatterVectorize\n";
2259         break;
2260       case NeedToGather:
2261         dbgs() << "NeedToGather\n";
2262         break;
2263       }
2264       dbgs() << "MainOp: ";
2265       if (MainOp)
2266         dbgs() << *MainOp << "\n";
2267       else
2268         dbgs() << "NULL\n";
2269       dbgs() << "AltOp: ";
2270       if (AltOp)
2271         dbgs() << *AltOp << "\n";
2272       else
2273         dbgs() << "NULL\n";
2274       dbgs() << "VectorizedValue: ";
2275       if (VectorizedValue)
2276         dbgs() << *VectorizedValue << "\n";
2277       else
2278         dbgs() << "NULL\n";
2279       dbgs() << "ReuseShuffleIndices: ";
2280       if (ReuseShuffleIndices.empty())
2281         dbgs() << "Empty";
2282       else
2283         for (int ReuseIdx : ReuseShuffleIndices)
2284           dbgs() << ReuseIdx << ", ";
2285       dbgs() << "\n";
2286       dbgs() << "ReorderIndices: ";
2287       for (unsigned ReorderIdx : ReorderIndices)
2288         dbgs() << ReorderIdx << ", ";
2289       dbgs() << "\n";
2290       dbgs() << "UserTreeIndices: ";
2291       for (const auto &EInfo : UserTreeIndices)
2292         dbgs() << EInfo << ", ";
2293       dbgs() << "\n";
2294     }
2295 #endif
2296   };
2297 
2298 #ifndef NDEBUG
2299   void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost,
2300                      InstructionCost VecCost,
2301                      InstructionCost ScalarCost) const {
2302     dbgs() << "SLP: Calculated costs for Tree:\n"; E->dump();
2303     dbgs() << "SLP: Costs:\n";
2304     dbgs() << "SLP:     ReuseShuffleCost = " << ReuseShuffleCost << "\n";
2305     dbgs() << "SLP:     VectorCost = " << VecCost << "\n";
2306     dbgs() << "SLP:     ScalarCost = " << ScalarCost << "\n";
2307     dbgs() << "SLP:     ReuseShuffleCost + VecCost - ScalarCost = " <<
2308                ReuseShuffleCost + VecCost - ScalarCost << "\n";
2309   }
2310 #endif
2311 
2312   /// Create a new VectorizableTree entry.
2313   TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle,
2314                           const InstructionsState &S,
2315                           const EdgeInfo &UserTreeIdx,
2316                           ArrayRef<int> ReuseShuffleIndices = None,
2317                           ArrayRef<unsigned> ReorderIndices = None) {
2318     TreeEntry::EntryState EntryState =
2319         Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather;
2320     return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx,
2321                         ReuseShuffleIndices, ReorderIndices);
2322   }
2323 
2324   TreeEntry *newTreeEntry(ArrayRef<Value *> VL,
2325                           TreeEntry::EntryState EntryState,
2326                           Optional<ScheduleData *> Bundle,
2327                           const InstructionsState &S,
2328                           const EdgeInfo &UserTreeIdx,
2329                           ArrayRef<int> ReuseShuffleIndices = None,
2330                           ArrayRef<unsigned> ReorderIndices = None) {
2331     assert(((!Bundle && EntryState == TreeEntry::NeedToGather) ||
2332             (Bundle && EntryState != TreeEntry::NeedToGather)) &&
2333            "Need to vectorize gather entry?");
2334     VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree));
2335     TreeEntry *Last = VectorizableTree.back().get();
2336     Last->Idx = VectorizableTree.size() - 1;
2337     Last->State = EntryState;
2338     Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(),
2339                                      ReuseShuffleIndices.end());
2340     if (ReorderIndices.empty()) {
2341       Last->Scalars.assign(VL.begin(), VL.end());
2342       Last->setOperations(S);
2343     } else {
2344       // Reorder scalars and build final mask.
2345       Last->Scalars.assign(VL.size(), nullptr);
2346       transform(ReorderIndices, Last->Scalars.begin(),
2347                 [VL](unsigned Idx) -> Value * {
2348                   if (Idx >= VL.size())
2349                     return UndefValue::get(VL.front()->getType());
2350                   return VL[Idx];
2351                 });
2352       InstructionsState S = getSameOpcode(Last->Scalars);
2353       Last->setOperations(S);
2354       Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end());
2355     }
2356     if (Last->State != TreeEntry::NeedToGather) {
2357       for (Value *V : VL) {
2358         assert(!getTreeEntry(V) && "Scalar already in tree!");
2359         ScalarToTreeEntry[V] = Last;
2360       }
2361       // Update the scheduler bundle to point to this TreeEntry.
2362       unsigned Lane = 0;
2363       for (ScheduleData *BundleMember = Bundle.getValue(); BundleMember;
2364            BundleMember = BundleMember->NextInBundle) {
2365         BundleMember->TE = Last;
2366         BundleMember->Lane = Lane;
2367         ++Lane;
2368       }
2369       assert((!Bundle.getValue() || Lane == VL.size()) &&
2370              "Bundle and VL out of sync");
2371     } else {
2372       MustGather.insert(VL.begin(), VL.end());
2373     }
2374 
2375     if (UserTreeIdx.UserTE)
2376       Last->UserTreeIndices.push_back(UserTreeIdx);
2377 
2378     return Last;
2379   }
2380 
2381   /// -- Vectorization State --
2382   /// Holds all of the tree entries.
2383   TreeEntry::VecTreeTy VectorizableTree;
2384 
2385 #ifndef NDEBUG
2386   /// Debug printer.
2387   LLVM_DUMP_METHOD void dumpVectorizableTree() const {
2388     for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) {
2389       VectorizableTree[Id]->dump();
2390       dbgs() << "\n";
2391     }
2392   }
2393 #endif
2394 
2395   TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); }
2396 
2397   const TreeEntry *getTreeEntry(Value *V) const {
2398     return ScalarToTreeEntry.lookup(V);
2399   }
2400 
2401   /// Maps a specific scalar to its tree entry.
2402   SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry;
2403 
2404   /// Maps a value to the proposed vectorizable size.
2405   SmallDenseMap<Value *, unsigned> InstrElementSize;
2406 
2407   /// A list of scalars that we found that we need to keep as scalars.
2408   ValueSet MustGather;
2409 
2410   /// This POD struct describes one external user in the vectorized tree.
2411   struct ExternalUser {
2412     ExternalUser(Value *S, llvm::User *U, int L)
2413         : Scalar(S), User(U), Lane(L) {}
2414 
2415     // Which scalar in our function.
2416     Value *Scalar;
2417 
2418     // Which user that uses the scalar.
2419     llvm::User *User;
2420 
2421     // Which lane does the scalar belong to.
2422     int Lane;
2423   };
2424   using UserList = SmallVector<ExternalUser, 16>;
2425 
2426   /// Checks if two instructions may access the same memory.
2427   ///
2428   /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
2429   /// is invariant in the calling loop.
2430   bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1,
2431                  Instruction *Inst2) {
2432     // First check if the result is already in the cache.
2433     AliasCacheKey key = std::make_pair(Inst1, Inst2);
2434     Optional<bool> &result = AliasCache[key];
2435     if (result.hasValue()) {
2436       return result.getValue();
2437     }
2438     bool aliased = true;
2439     if (Loc1.Ptr && isSimple(Inst1))
2440       aliased = isModOrRefSet(BatchAA.getModRefInfo(Inst2, Loc1));
2441     // Store the result in the cache.
2442     result = aliased;
2443     return aliased;
2444   }
2445 
2446   using AliasCacheKey = std::pair<Instruction *, Instruction *>;
2447 
2448   /// Cache for alias results.
2449   /// TODO: consider moving this to the AliasAnalysis itself.
2450   DenseMap<AliasCacheKey, Optional<bool>> AliasCache;
2451 
2452   // Cache for pointerMayBeCaptured calls inside AA.  This is preserved
2453   // globally through SLP because we don't perform any action which
2454   // invalidates capture results.
2455   BatchAAResults BatchAA;
2456 
2457   /// Removes an instruction from its block and eventually deletes it.
2458   /// It's like Instruction::eraseFromParent() except that the actual deletion
2459   /// is delayed until BoUpSLP is destructed.
2460   /// This is required to ensure that there are no incorrect collisions in the
2461   /// AliasCache, which can happen if a new instruction is allocated at the
2462   /// same address as a previously deleted instruction.
2463   void eraseInstruction(Instruction *I, bool ReplaceOpsWithUndef = false) {
2464     auto It = DeletedInstructions.try_emplace(I, ReplaceOpsWithUndef).first;
2465     It->getSecond() = It->getSecond() && ReplaceOpsWithUndef;
2466   }
2467 
2468   /// Temporary store for deleted instructions. Instructions will be deleted
2469   /// eventually when the BoUpSLP is destructed.
2470   DenseMap<Instruction *, bool> DeletedInstructions;
2471 
2472   /// A list of values that need to extracted out of the tree.
2473   /// This list holds pairs of (Internal Scalar : External User). External User
2474   /// can be nullptr, it means that this Internal Scalar will be used later,
2475   /// after vectorization.
2476   UserList ExternalUses;
2477 
2478   /// Values used only by @llvm.assume calls.
2479   SmallPtrSet<const Value *, 32> EphValues;
2480 
2481   /// Holds all of the instructions that we gathered.
2482   SetVector<Instruction *> GatherShuffleSeq;
2483 
2484   /// A list of blocks that we are going to CSE.
2485   SetVector<BasicBlock *> CSEBlocks;
2486 
2487   /// Contains all scheduling relevant data for an instruction.
2488   /// A ScheduleData either represents a single instruction or a member of an
2489   /// instruction bundle (= a group of instructions which is combined into a
2490   /// vector instruction).
2491   struct ScheduleData {
2492     // The initial value for the dependency counters. It means that the
2493     // dependencies are not calculated yet.
2494     enum { InvalidDeps = -1 };
2495 
2496     ScheduleData() = default;
2497 
2498     void init(int BlockSchedulingRegionID, Value *OpVal) {
2499       FirstInBundle = this;
2500       NextInBundle = nullptr;
2501       NextLoadStore = nullptr;
2502       IsScheduled = false;
2503       SchedulingRegionID = BlockSchedulingRegionID;
2504       clearDependencies();
2505       OpValue = OpVal;
2506       TE = nullptr;
2507       Lane = -1;
2508     }
2509 
2510     /// Verify basic self consistency properties
2511     void verify() {
2512       if (hasValidDependencies()) {
2513         assert(UnscheduledDeps <= Dependencies && "invariant");
2514       } else {
2515         assert(UnscheduledDeps == Dependencies && "invariant");
2516       }
2517 
2518       if (IsScheduled) {
2519         assert(isSchedulingEntity() &&
2520                 "unexpected scheduled state");
2521         for (const ScheduleData *BundleMember = this; BundleMember;
2522              BundleMember = BundleMember->NextInBundle) {
2523           assert(BundleMember->hasValidDependencies() &&
2524                  BundleMember->UnscheduledDeps == 0 &&
2525                  "unexpected scheduled state");
2526           assert((BundleMember == this || !BundleMember->IsScheduled) &&
2527                  "only bundle is marked scheduled");
2528         }
2529       }
2530 
2531       assert(Inst->getParent() == FirstInBundle->Inst->getParent() &&
2532              "all bundle members must be in same basic block");
2533     }
2534 
2535     /// Returns true if the dependency information has been calculated.
2536     /// Note that depenendency validity can vary between instructions within
2537     /// a single bundle.
2538     bool hasValidDependencies() const { return Dependencies != InvalidDeps; }
2539 
2540     /// Returns true for single instructions and for bundle representatives
2541     /// (= the head of a bundle).
2542     bool isSchedulingEntity() const { return FirstInBundle == this; }
2543 
2544     /// Returns true if it represents an instruction bundle and not only a
2545     /// single instruction.
2546     bool isPartOfBundle() const {
2547       return NextInBundle != nullptr || FirstInBundle != this;
2548     }
2549 
2550     /// Returns true if it is ready for scheduling, i.e. it has no more
2551     /// unscheduled depending instructions/bundles.
2552     bool isReady() const {
2553       assert(isSchedulingEntity() &&
2554              "can't consider non-scheduling entity for ready list");
2555       return unscheduledDepsInBundle() == 0 && !IsScheduled;
2556     }
2557 
2558     /// Modifies the number of unscheduled dependencies for this instruction,
2559     /// and returns the number of remaining dependencies for the containing
2560     /// bundle.
2561     int incrementUnscheduledDeps(int Incr) {
2562       assert(hasValidDependencies() &&
2563              "increment of unscheduled deps would be meaningless");
2564       UnscheduledDeps += Incr;
2565       return FirstInBundle->unscheduledDepsInBundle();
2566     }
2567 
2568     /// Sets the number of unscheduled dependencies to the number of
2569     /// dependencies.
2570     void resetUnscheduledDeps() {
2571       UnscheduledDeps = Dependencies;
2572     }
2573 
2574     /// Clears all dependency information.
2575     void clearDependencies() {
2576       Dependencies = InvalidDeps;
2577       resetUnscheduledDeps();
2578       MemoryDependencies.clear();
2579     }
2580 
2581     int unscheduledDepsInBundle() const {
2582       assert(isSchedulingEntity() && "only meaningful on the bundle");
2583       int Sum = 0;
2584       for (const ScheduleData *BundleMember = this; BundleMember;
2585            BundleMember = BundleMember->NextInBundle) {
2586         if (BundleMember->UnscheduledDeps == InvalidDeps)
2587           return InvalidDeps;
2588         Sum += BundleMember->UnscheduledDeps;
2589       }
2590       return Sum;
2591     }
2592 
2593     void dump(raw_ostream &os) const {
2594       if (!isSchedulingEntity()) {
2595         os << "/ " << *Inst;
2596       } else if (NextInBundle) {
2597         os << '[' << *Inst;
2598         ScheduleData *SD = NextInBundle;
2599         while (SD) {
2600           os << ';' << *SD->Inst;
2601           SD = SD->NextInBundle;
2602         }
2603         os << ']';
2604       } else {
2605         os << *Inst;
2606       }
2607     }
2608 
2609     Instruction *Inst = nullptr;
2610 
2611     /// Opcode of the current instruction in the schedule data.
2612     Value *OpValue = nullptr;
2613 
2614     /// The TreeEntry that this instruction corresponds to.
2615     TreeEntry *TE = nullptr;
2616 
2617     /// Points to the head in an instruction bundle (and always to this for
2618     /// single instructions).
2619     ScheduleData *FirstInBundle = nullptr;
2620 
2621     /// Single linked list of all instructions in a bundle. Null if it is a
2622     /// single instruction.
2623     ScheduleData *NextInBundle = nullptr;
2624 
2625     /// Single linked list of all memory instructions (e.g. load, store, call)
2626     /// in the block - until the end of the scheduling region.
2627     ScheduleData *NextLoadStore = nullptr;
2628 
2629     /// The dependent memory instructions.
2630     /// This list is derived on demand in calculateDependencies().
2631     SmallVector<ScheduleData *, 4> MemoryDependencies;
2632 
2633     /// This ScheduleData is in the current scheduling region if this matches
2634     /// the current SchedulingRegionID of BlockScheduling.
2635     int SchedulingRegionID = 0;
2636 
2637     /// Used for getting a "good" final ordering of instructions.
2638     int SchedulingPriority = 0;
2639 
2640     /// The number of dependencies. Constitutes of the number of users of the
2641     /// instruction plus the number of dependent memory instructions (if any).
2642     /// This value is calculated on demand.
2643     /// If InvalidDeps, the number of dependencies is not calculated yet.
2644     int Dependencies = InvalidDeps;
2645 
2646     /// The number of dependencies minus the number of dependencies of scheduled
2647     /// instructions. As soon as this is zero, the instruction/bundle gets ready
2648     /// for scheduling.
2649     /// Note that this is negative as long as Dependencies is not calculated.
2650     int UnscheduledDeps = InvalidDeps;
2651 
2652     /// The lane of this node in the TreeEntry.
2653     int Lane = -1;
2654 
2655     /// True if this instruction is scheduled (or considered as scheduled in the
2656     /// dry-run).
2657     bool IsScheduled = false;
2658   };
2659 
2660 #ifndef NDEBUG
2661   friend inline raw_ostream &operator<<(raw_ostream &os,
2662                                         const BoUpSLP::ScheduleData &SD) {
2663     SD.dump(os);
2664     return os;
2665   }
2666 #endif
2667 
2668   friend struct GraphTraits<BoUpSLP *>;
2669   friend struct DOTGraphTraits<BoUpSLP *>;
2670 
2671   /// Contains all scheduling data for a basic block.
2672   struct BlockScheduling {
2673     BlockScheduling(BasicBlock *BB)
2674         : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {}
2675 
2676     void clear() {
2677       ReadyInsts.clear();
2678       ScheduleStart = nullptr;
2679       ScheduleEnd = nullptr;
2680       FirstLoadStoreInRegion = nullptr;
2681       LastLoadStoreInRegion = nullptr;
2682 
2683       // Reduce the maximum schedule region size by the size of the
2684       // previous scheduling run.
2685       ScheduleRegionSizeLimit -= ScheduleRegionSize;
2686       if (ScheduleRegionSizeLimit < MinScheduleRegionSize)
2687         ScheduleRegionSizeLimit = MinScheduleRegionSize;
2688       ScheduleRegionSize = 0;
2689 
2690       // Make a new scheduling region, i.e. all existing ScheduleData is not
2691       // in the new region yet.
2692       ++SchedulingRegionID;
2693     }
2694 
2695     ScheduleData *getScheduleData(Instruction *I) {
2696       if (BB != I->getParent())
2697         // Avoid lookup if can't possibly be in map.
2698         return nullptr;
2699       ScheduleData *SD = ScheduleDataMap[I];
2700       if (SD && isInSchedulingRegion(SD))
2701         return SD;
2702       return nullptr;
2703     }
2704 
2705     ScheduleData *getScheduleData(Value *V) {
2706       if (auto *I = dyn_cast<Instruction>(V))
2707         return getScheduleData(I);
2708       return nullptr;
2709     }
2710 
2711     ScheduleData *getScheduleData(Value *V, Value *Key) {
2712       if (V == Key)
2713         return getScheduleData(V);
2714       auto I = ExtraScheduleDataMap.find(V);
2715       if (I != ExtraScheduleDataMap.end()) {
2716         ScheduleData *SD = I->second[Key];
2717         if (SD && isInSchedulingRegion(SD))
2718           return SD;
2719       }
2720       return nullptr;
2721     }
2722 
2723     bool isInSchedulingRegion(ScheduleData *SD) const {
2724       return SD->SchedulingRegionID == SchedulingRegionID;
2725     }
2726 
2727     /// Marks an instruction as scheduled and puts all dependent ready
2728     /// instructions into the ready-list.
2729     template <typename ReadyListType>
2730     void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
2731       SD->IsScheduled = true;
2732       LLVM_DEBUG(dbgs() << "SLP:   schedule " << *SD << "\n");
2733 
2734       for (ScheduleData *BundleMember = SD; BundleMember;
2735            BundleMember = BundleMember->NextInBundle) {
2736         if (BundleMember->Inst != BundleMember->OpValue)
2737           continue;
2738 
2739         // Handle the def-use chain dependencies.
2740 
2741         // Decrement the unscheduled counter and insert to ready list if ready.
2742         auto &&DecrUnsched = [this, &ReadyList](Instruction *I) {
2743           doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) {
2744             if (OpDef && OpDef->hasValidDependencies() &&
2745                 OpDef->incrementUnscheduledDeps(-1) == 0) {
2746               // There are no more unscheduled dependencies after
2747               // decrementing, so we can put the dependent instruction
2748               // into the ready list.
2749               ScheduleData *DepBundle = OpDef->FirstInBundle;
2750               assert(!DepBundle->IsScheduled &&
2751                      "already scheduled bundle gets ready");
2752               ReadyList.insert(DepBundle);
2753               LLVM_DEBUG(dbgs()
2754                          << "SLP:    gets ready (def): " << *DepBundle << "\n");
2755             }
2756           });
2757         };
2758 
2759         // If BundleMember is a vector bundle, its operands may have been
2760         // reordered during buildTree(). We therefore need to get its operands
2761         // through the TreeEntry.
2762         if (TreeEntry *TE = BundleMember->TE) {
2763           int Lane = BundleMember->Lane;
2764           assert(Lane >= 0 && "Lane not set");
2765 
2766           // Since vectorization tree is being built recursively this assertion
2767           // ensures that the tree entry has all operands set before reaching
2768           // this code. Couple of exceptions known at the moment are extracts
2769           // where their second (immediate) operand is not added. Since
2770           // immediates do not affect scheduler behavior this is considered
2771           // okay.
2772           auto *In = TE->getMainOp();
2773           assert(In &&
2774                  (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) ||
2775                   In->getNumOperands() == TE->getNumOperands()) &&
2776                  "Missed TreeEntry operands?");
2777           (void)In; // fake use to avoid build failure when assertions disabled
2778 
2779           for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands();
2780                OpIdx != NumOperands; ++OpIdx)
2781             if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane]))
2782               DecrUnsched(I);
2783         } else {
2784           // If BundleMember is a stand-alone instruction, no operand reordering
2785           // has taken place, so we directly access its operands.
2786           for (Use &U : BundleMember->Inst->operands())
2787             if (auto *I = dyn_cast<Instruction>(U.get()))
2788               DecrUnsched(I);
2789         }
2790         // Handle the memory dependencies.
2791         for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) {
2792           if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) {
2793             // There are no more unscheduled dependencies after decrementing,
2794             // so we can put the dependent instruction into the ready list.
2795             ScheduleData *DepBundle = MemoryDepSD->FirstInBundle;
2796             assert(!DepBundle->IsScheduled &&
2797                    "already scheduled bundle gets ready");
2798             ReadyList.insert(DepBundle);
2799             LLVM_DEBUG(dbgs()
2800                        << "SLP:    gets ready (mem): " << *DepBundle << "\n");
2801           }
2802         }
2803       }
2804     }
2805 
2806     /// Verify basic self consistency properties of the data structure.
2807     void verify() {
2808       if (!ScheduleStart)
2809         return;
2810 
2811       assert(ScheduleStart->getParent() == ScheduleEnd->getParent() &&
2812              ScheduleStart->comesBefore(ScheduleEnd) &&
2813              "Not a valid scheduling region?");
2814 
2815       for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
2816         auto *SD = getScheduleData(I);
2817         assert(SD && "primary scheduledata must exist in window");
2818         assert(isInSchedulingRegion(SD) &&
2819                "primary schedule data not in window?");
2820         assert(isInSchedulingRegion(SD->FirstInBundle) &&
2821                "entire bundle in window!");
2822         (void)SD;
2823         doForAllOpcodes(I, [](ScheduleData *SD) { SD->verify(); });
2824       }
2825 
2826       for (auto *SD : ReadyInsts) {
2827         assert(SD->isSchedulingEntity() && SD->isReady() &&
2828                "item in ready list not ready?");
2829         (void)SD;
2830       }
2831     }
2832 
2833     void doForAllOpcodes(Value *V,
2834                          function_ref<void(ScheduleData *SD)> Action) {
2835       if (ScheduleData *SD = getScheduleData(V))
2836         Action(SD);
2837       auto I = ExtraScheduleDataMap.find(V);
2838       if (I != ExtraScheduleDataMap.end())
2839         for (auto &P : I->second)
2840           if (isInSchedulingRegion(P.second))
2841             Action(P.second);
2842     }
2843 
2844     /// Put all instructions into the ReadyList which are ready for scheduling.
2845     template <typename ReadyListType>
2846     void initialFillReadyList(ReadyListType &ReadyList) {
2847       for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
2848         doForAllOpcodes(I, [&](ScheduleData *SD) {
2849           if (SD->isSchedulingEntity() && SD->isReady()) {
2850             ReadyList.insert(SD);
2851             LLVM_DEBUG(dbgs()
2852                        << "SLP:    initially in ready list: " << *SD << "\n");
2853           }
2854         });
2855       }
2856     }
2857 
2858     /// Build a bundle from the ScheduleData nodes corresponding to the
2859     /// scalar instruction for each lane.
2860     ScheduleData *buildBundle(ArrayRef<Value *> VL);
2861 
2862     /// Checks if a bundle of instructions can be scheduled, i.e. has no
2863     /// cyclic dependencies. This is only a dry-run, no instructions are
2864     /// actually moved at this stage.
2865     /// \returns the scheduling bundle. The returned Optional value is non-None
2866     /// if \p VL is allowed to be scheduled.
2867     Optional<ScheduleData *>
2868     tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
2869                       const InstructionsState &S);
2870 
2871     /// Un-bundles a group of instructions.
2872     void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue);
2873 
2874     /// Allocates schedule data chunk.
2875     ScheduleData *allocateScheduleDataChunks();
2876 
2877     /// Extends the scheduling region so that V is inside the region.
2878     /// \returns true if the region size is within the limit.
2879     bool extendSchedulingRegion(Value *V, const InstructionsState &S);
2880 
2881     /// Initialize the ScheduleData structures for new instructions in the
2882     /// scheduling region.
2883     void initScheduleData(Instruction *FromI, Instruction *ToI,
2884                           ScheduleData *PrevLoadStore,
2885                           ScheduleData *NextLoadStore);
2886 
2887     /// Updates the dependency information of a bundle and of all instructions/
2888     /// bundles which depend on the original bundle.
2889     void calculateDependencies(ScheduleData *SD, bool InsertInReadyList,
2890                                BoUpSLP *SLP);
2891 
2892     /// Sets all instruction in the scheduling region to un-scheduled.
2893     void resetSchedule();
2894 
2895     BasicBlock *BB;
2896 
2897     /// Simple memory allocation for ScheduleData.
2898     std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks;
2899 
2900     /// The size of a ScheduleData array in ScheduleDataChunks.
2901     int ChunkSize;
2902 
2903     /// The allocator position in the current chunk, which is the last entry
2904     /// of ScheduleDataChunks.
2905     int ChunkPos;
2906 
2907     /// Attaches ScheduleData to Instruction.
2908     /// Note that the mapping survives during all vectorization iterations, i.e.
2909     /// ScheduleData structures are recycled.
2910     DenseMap<Instruction *, ScheduleData *> ScheduleDataMap;
2911 
2912     /// Attaches ScheduleData to Instruction with the leading key.
2913     DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>>
2914         ExtraScheduleDataMap;
2915 
2916     /// The ready-list for scheduling (only used for the dry-run).
2917     SetVector<ScheduleData *> ReadyInsts;
2918 
2919     /// The first instruction of the scheduling region.
2920     Instruction *ScheduleStart = nullptr;
2921 
2922     /// The first instruction _after_ the scheduling region.
2923     Instruction *ScheduleEnd = nullptr;
2924 
2925     /// The first memory accessing instruction in the scheduling region
2926     /// (can be null).
2927     ScheduleData *FirstLoadStoreInRegion = nullptr;
2928 
2929     /// The last memory accessing instruction in the scheduling region
2930     /// (can be null).
2931     ScheduleData *LastLoadStoreInRegion = nullptr;
2932 
2933     /// The current size of the scheduling region.
2934     int ScheduleRegionSize = 0;
2935 
2936     /// The maximum size allowed for the scheduling region.
2937     int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget;
2938 
2939     /// The ID of the scheduling region. For a new vectorization iteration this
2940     /// is incremented which "removes" all ScheduleData from the region.
2941     /// Make sure that the initial SchedulingRegionID is greater than the
2942     /// initial SchedulingRegionID in ScheduleData (which is 0).
2943     int SchedulingRegionID = 1;
2944   };
2945 
2946   /// Attaches the BlockScheduling structures to basic blocks.
2947   MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules;
2948 
2949   /// Performs the "real" scheduling. Done before vectorization is actually
2950   /// performed in a basic block.
2951   void scheduleBlock(BlockScheduling *BS);
2952 
2953   /// List of users to ignore during scheduling and that don't need extracting.
2954   ArrayRef<Value *> UserIgnoreList;
2955 
2956   /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of
2957   /// sorted SmallVectors of unsigned.
2958   struct OrdersTypeDenseMapInfo {
2959     static OrdersType getEmptyKey() {
2960       OrdersType V;
2961       V.push_back(~1U);
2962       return V;
2963     }
2964 
2965     static OrdersType getTombstoneKey() {
2966       OrdersType V;
2967       V.push_back(~2U);
2968       return V;
2969     }
2970 
2971     static unsigned getHashValue(const OrdersType &V) {
2972       return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
2973     }
2974 
2975     static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) {
2976       return LHS == RHS;
2977     }
2978   };
2979 
2980   // Analysis and block reference.
2981   Function *F;
2982   ScalarEvolution *SE;
2983   TargetTransformInfo *TTI;
2984   TargetLibraryInfo *TLI;
2985   LoopInfo *LI;
2986   DominatorTree *DT;
2987   AssumptionCache *AC;
2988   DemandedBits *DB;
2989   MemorySSA *MSSA;
2990   const DataLayout *DL;
2991   OptimizationRemarkEmitter *ORE;
2992 
2993   unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt.
2994   unsigned MinVecRegSize; // Set by cl::opt (default: 128).
2995 
2996   /// Instruction builder to construct the vectorized tree.
2997   IRBuilder<> Builder;
2998 
2999   /// A map of scalar integer values to the smallest bit width with which they
3000   /// can legally be represented. The values map to (width, signed) pairs,
3001   /// where "width" indicates the minimum bit width and "signed" is True if the
3002   /// value must be signed-extended, rather than zero-extended, back to its
3003   /// original width.
3004   MapVector<Value *, std::pair<uint64_t, bool>> MinBWs;
3005 };
3006 
3007 } // end namespace slpvectorizer
3008 
3009 template <> struct GraphTraits<BoUpSLP *> {
3010   using TreeEntry = BoUpSLP::TreeEntry;
3011 
3012   /// NodeRef has to be a pointer per the GraphWriter.
3013   using NodeRef = TreeEntry *;
3014 
3015   using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy;
3016 
3017   /// Add the VectorizableTree to the index iterator to be able to return
3018   /// TreeEntry pointers.
3019   struct ChildIteratorType
3020       : public iterator_adaptor_base<
3021             ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> {
3022     ContainerTy &VectorizableTree;
3023 
3024     ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W,
3025                       ContainerTy &VT)
3026         : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {}
3027 
3028     NodeRef operator*() { return I->UserTE; }
3029   };
3030 
3031   static NodeRef getEntryNode(BoUpSLP &R) {
3032     return R.VectorizableTree[0].get();
3033   }
3034 
3035   static ChildIteratorType child_begin(NodeRef N) {
3036     return {N->UserTreeIndices.begin(), N->Container};
3037   }
3038 
3039   static ChildIteratorType child_end(NodeRef N) {
3040     return {N->UserTreeIndices.end(), N->Container};
3041   }
3042 
3043   /// For the node iterator we just need to turn the TreeEntry iterator into a
3044   /// TreeEntry* iterator so that it dereferences to NodeRef.
3045   class nodes_iterator {
3046     using ItTy = ContainerTy::iterator;
3047     ItTy It;
3048 
3049   public:
3050     nodes_iterator(const ItTy &It2) : It(It2) {}
3051     NodeRef operator*() { return It->get(); }
3052     nodes_iterator operator++() {
3053       ++It;
3054       return *this;
3055     }
3056     bool operator!=(const nodes_iterator &N2) const { return N2.It != It; }
3057   };
3058 
3059   static nodes_iterator nodes_begin(BoUpSLP *R) {
3060     return nodes_iterator(R->VectorizableTree.begin());
3061   }
3062 
3063   static nodes_iterator nodes_end(BoUpSLP *R) {
3064     return nodes_iterator(R->VectorizableTree.end());
3065   }
3066 
3067   static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); }
3068 };
3069 
3070 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits {
3071   using TreeEntry = BoUpSLP::TreeEntry;
3072 
3073   DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
3074 
3075   std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) {
3076     std::string Str;
3077     raw_string_ostream OS(Str);
3078     if (isSplat(Entry->Scalars))
3079       OS << "<splat> ";
3080     for (auto V : Entry->Scalars) {
3081       OS << *V;
3082       if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) {
3083             return EU.Scalar == V;
3084           }))
3085         OS << " <extract>";
3086       OS << "\n";
3087     }
3088     return Str;
3089   }
3090 
3091   static std::string getNodeAttributes(const TreeEntry *Entry,
3092                                        const BoUpSLP *) {
3093     if (Entry->State == TreeEntry::NeedToGather)
3094       return "color=red";
3095     return "";
3096   }
3097 };
3098 
3099 } // end namespace llvm
3100 
3101 BoUpSLP::~BoUpSLP() {
3102   if (MSSA) {
3103     MemorySSAUpdater MSSAU(MSSA);
3104     for (const auto &Pair : DeletedInstructions) {
3105       if (auto *Access = MSSA->getMemoryAccess(Pair.first))
3106         MSSAU.removeMemoryAccess(Access);
3107     }
3108   }
3109   for (const auto &Pair : DeletedInstructions) {
3110     // Replace operands of ignored instructions with Undefs in case if they were
3111     // marked for deletion.
3112     if (Pair.getSecond()) {
3113       Value *Undef = UndefValue::get(Pair.getFirst()->getType());
3114       Pair.getFirst()->replaceAllUsesWith(Undef);
3115     }
3116     Pair.getFirst()->dropAllReferences();
3117   }
3118   for (const auto &Pair : DeletedInstructions) {
3119     assert(Pair.getFirst()->use_empty() &&
3120            "trying to erase instruction with users.");
3121     Pair.getFirst()->eraseFromParent();
3122   }
3123 #ifdef EXPENSIVE_CHECKS
3124   // If we could guarantee that this call is not extremely slow, we could
3125   // remove the ifdef limitation (see PR47712).
3126   assert(!verifyFunction(*F, &dbgs()));
3127 #endif
3128 }
3129 
3130 void BoUpSLP::eraseInstructions(ArrayRef<Value *> AV) {
3131   for (auto *V : AV) {
3132     if (auto *I = dyn_cast<Instruction>(V))
3133       eraseInstruction(I, /*ReplaceOpsWithUndef=*/true);
3134   };
3135 }
3136 
3137 /// Reorders the given \p Reuses mask according to the given \p Mask. \p Reuses
3138 /// contains original mask for the scalars reused in the node. Procedure
3139 /// transform this mask in accordance with the given \p Mask.
3140 static void reorderReuses(SmallVectorImpl<int> &Reuses, ArrayRef<int> Mask) {
3141   assert(!Mask.empty() && Reuses.size() == Mask.size() &&
3142          "Expected non-empty mask.");
3143   SmallVector<int> Prev(Reuses.begin(), Reuses.end());
3144   Prev.swap(Reuses);
3145   for (unsigned I = 0, E = Prev.size(); I < E; ++I)
3146     if (Mask[I] != UndefMaskElem)
3147       Reuses[Mask[I]] = Prev[I];
3148 }
3149 
3150 /// Reorders the given \p Order according to the given \p Mask. \p Order - is
3151 /// the original order of the scalars. Procedure transforms the provided order
3152 /// in accordance with the given \p Mask. If the resulting \p Order is just an
3153 /// identity order, \p Order is cleared.
3154 static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask) {
3155   assert(!Mask.empty() && "Expected non-empty mask.");
3156   SmallVector<int> MaskOrder;
3157   if (Order.empty()) {
3158     MaskOrder.resize(Mask.size());
3159     std::iota(MaskOrder.begin(), MaskOrder.end(), 0);
3160   } else {
3161     inversePermutation(Order, MaskOrder);
3162   }
3163   reorderReuses(MaskOrder, Mask);
3164   if (ShuffleVectorInst::isIdentityMask(MaskOrder)) {
3165     Order.clear();
3166     return;
3167   }
3168   Order.assign(Mask.size(), Mask.size());
3169   for (unsigned I = 0, E = Mask.size(); I < E; ++I)
3170     if (MaskOrder[I] != UndefMaskElem)
3171       Order[MaskOrder[I]] = I;
3172   fixupOrderingIndices(Order);
3173 }
3174 
3175 Optional<BoUpSLP::OrdersType>
3176 BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) {
3177   assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only.");
3178   unsigned NumScalars = TE.Scalars.size();
3179   OrdersType CurrentOrder(NumScalars, NumScalars);
3180   SmallVector<int> Positions;
3181   SmallBitVector UsedPositions(NumScalars);
3182   const TreeEntry *STE = nullptr;
3183   // Try to find all gathered scalars that are gets vectorized in other
3184   // vectorize node. Here we can have only one single tree vector node to
3185   // correctly identify order of the gathered scalars.
3186   for (unsigned I = 0; I < NumScalars; ++I) {
3187     Value *V = TE.Scalars[I];
3188     if (!isa<LoadInst, ExtractElementInst, ExtractValueInst>(V))
3189       continue;
3190     if (const auto *LocalSTE = getTreeEntry(V)) {
3191       if (!STE)
3192         STE = LocalSTE;
3193       else if (STE != LocalSTE)
3194         // Take the order only from the single vector node.
3195         return None;
3196       unsigned Lane =
3197           std::distance(STE->Scalars.begin(), find(STE->Scalars, V));
3198       if (Lane >= NumScalars)
3199         return None;
3200       if (CurrentOrder[Lane] != NumScalars) {
3201         if (Lane != I)
3202           continue;
3203         UsedPositions.reset(CurrentOrder[Lane]);
3204       }
3205       // The partial identity (where only some elements of the gather node are
3206       // in the identity order) is good.
3207       CurrentOrder[Lane] = I;
3208       UsedPositions.set(I);
3209     }
3210   }
3211   // Need to keep the order if we have a vector entry and at least 2 scalars or
3212   // the vectorized entry has just 2 scalars.
3213   if (STE && (UsedPositions.count() > 1 || STE->Scalars.size() == 2)) {
3214     auto &&IsIdentityOrder = [NumScalars](ArrayRef<unsigned> CurrentOrder) {
3215       for (unsigned I = 0; I < NumScalars; ++I)
3216         if (CurrentOrder[I] != I && CurrentOrder[I] != NumScalars)
3217           return false;
3218       return true;
3219     };
3220     if (IsIdentityOrder(CurrentOrder)) {
3221       CurrentOrder.clear();
3222       return CurrentOrder;
3223     }
3224     auto *It = CurrentOrder.begin();
3225     for (unsigned I = 0; I < NumScalars;) {
3226       if (UsedPositions.test(I)) {
3227         ++I;
3228         continue;
3229       }
3230       if (*It == NumScalars) {
3231         *It = I;
3232         ++I;
3233       }
3234       ++It;
3235     }
3236     return CurrentOrder;
3237   }
3238   return None;
3239 }
3240 
3241 Optional<BoUpSLP::OrdersType> BoUpSLP::getReorderingData(const TreeEntry &TE,
3242                                                          bool TopToBottom) {
3243   // No need to reorder if need to shuffle reuses, still need to shuffle the
3244   // node.
3245   if (!TE.ReuseShuffleIndices.empty())
3246     return None;
3247   if (TE.State == TreeEntry::Vectorize &&
3248       (isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE.getMainOp()) ||
3249        (TopToBottom && isa<StoreInst, InsertElementInst>(TE.getMainOp()))) &&
3250       !TE.isAltShuffle())
3251     return TE.ReorderIndices;
3252   if (TE.State == TreeEntry::NeedToGather) {
3253     // TODO: add analysis of other gather nodes with extractelement
3254     // instructions and other values/instructions, not only undefs.
3255     if (((TE.getOpcode() == Instruction::ExtractElement &&
3256           !TE.isAltShuffle()) ||
3257          (all_of(TE.Scalars,
3258                  [](Value *V) {
3259                    return isa<UndefValue, ExtractElementInst>(V);
3260                  }) &&
3261           any_of(TE.Scalars,
3262                  [](Value *V) { return isa<ExtractElementInst>(V); }))) &&
3263         all_of(TE.Scalars,
3264                [](Value *V) {
3265                  auto *EE = dyn_cast<ExtractElementInst>(V);
3266                  return !EE || isa<FixedVectorType>(EE->getVectorOperandType());
3267                }) &&
3268         allSameType(TE.Scalars)) {
3269       // Check that gather of extractelements can be represented as
3270       // just a shuffle of a single vector.
3271       OrdersType CurrentOrder;
3272       bool Reuse = canReuseExtract(TE.Scalars, TE.getMainOp(), CurrentOrder);
3273       if (Reuse || !CurrentOrder.empty()) {
3274         if (!CurrentOrder.empty())
3275           fixupOrderingIndices(CurrentOrder);
3276         return CurrentOrder;
3277       }
3278     }
3279     if (Optional<OrdersType> CurrentOrder = findReusedOrderedScalars(TE))
3280       return CurrentOrder;
3281   }
3282   return None;
3283 }
3284 
3285 void BoUpSLP::reorderTopToBottom() {
3286   // Maps VF to the graph nodes.
3287   DenseMap<unsigned, SetVector<TreeEntry *>> VFToOrderedEntries;
3288   // ExtractElement gather nodes which can be vectorized and need to handle
3289   // their ordering.
3290   DenseMap<const TreeEntry *, OrdersType> GathersToOrders;
3291   // Find all reorderable nodes with the given VF.
3292   // Currently the are vectorized stores,loads,extracts + some gathering of
3293   // extracts.
3294   for_each(VectorizableTree, [this, &VFToOrderedEntries, &GathersToOrders](
3295                                  const std::unique_ptr<TreeEntry> &TE) {
3296     if (Optional<OrdersType> CurrentOrder =
3297             getReorderingData(*TE.get(), /*TopToBottom=*/true)) {
3298       // Do not include ordering for nodes used in the alt opcode vectorization,
3299       // better to reorder them during bottom-to-top stage. If follow the order
3300       // here, it causes reordering of the whole graph though actually it is
3301       // profitable just to reorder the subgraph that starts from the alternate
3302       // opcode vectorization node. Such nodes already end-up with the shuffle
3303       // instruction and it is just enough to change this shuffle rather than
3304       // rotate the scalars for the whole graph.
3305       unsigned Cnt = 0;
3306       const TreeEntry *UserTE = TE.get();
3307       while (UserTE && Cnt < RecursionMaxDepth) {
3308         if (UserTE->UserTreeIndices.size() != 1)
3309           break;
3310         if (all_of(UserTE->UserTreeIndices, [](const EdgeInfo &EI) {
3311               return EI.UserTE->State == TreeEntry::Vectorize &&
3312                      EI.UserTE->isAltShuffle() && EI.UserTE->Idx != 0;
3313             }))
3314           return;
3315         if (UserTE->UserTreeIndices.empty())
3316           UserTE = nullptr;
3317         else
3318           UserTE = UserTE->UserTreeIndices.back().UserTE;
3319         ++Cnt;
3320       }
3321       VFToOrderedEntries[TE->Scalars.size()].insert(TE.get());
3322       if (TE->State != TreeEntry::Vectorize)
3323         GathersToOrders.try_emplace(TE.get(), *CurrentOrder);
3324     }
3325   });
3326 
3327   // Reorder the graph nodes according to their vectorization factor.
3328   for (unsigned VF = VectorizableTree.front()->Scalars.size(); VF > 1;
3329        VF /= 2) {
3330     auto It = VFToOrderedEntries.find(VF);
3331     if (It == VFToOrderedEntries.end())
3332       continue;
3333     // Try to find the most profitable order. We just are looking for the most
3334     // used order and reorder scalar elements in the nodes according to this
3335     // mostly used order.
3336     ArrayRef<TreeEntry *> OrderedEntries = It->second.getArrayRef();
3337     // All operands are reordered and used only in this node - propagate the
3338     // most used order to the user node.
3339     MapVector<OrdersType, unsigned,
3340               DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>>
3341         OrdersUses;
3342     SmallPtrSet<const TreeEntry *, 4> VisitedOps;
3343     for (const TreeEntry *OpTE : OrderedEntries) {
3344       // No need to reorder this nodes, still need to extend and to use shuffle,
3345       // just need to merge reordering shuffle and the reuse shuffle.
3346       if (!OpTE->ReuseShuffleIndices.empty())
3347         continue;
3348       // Count number of orders uses.
3349       const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & {
3350         if (OpTE->State == TreeEntry::NeedToGather)
3351           return GathersToOrders.find(OpTE)->second;
3352         return OpTE->ReorderIndices;
3353       }();
3354       // Stores actually store the mask, not the order, need to invert.
3355       if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() &&
3356           OpTE->getOpcode() == Instruction::Store && !Order.empty()) {
3357         SmallVector<int> Mask;
3358         inversePermutation(Order, Mask);
3359         unsigned E = Order.size();
3360         OrdersType CurrentOrder(E, E);
3361         transform(Mask, CurrentOrder.begin(), [E](int Idx) {
3362           return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx);
3363         });
3364         fixupOrderingIndices(CurrentOrder);
3365         ++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second;
3366       } else {
3367         ++OrdersUses.insert(std::make_pair(Order, 0)).first->second;
3368       }
3369     }
3370     // Set order of the user node.
3371     if (OrdersUses.empty())
3372       continue;
3373     // Choose the most used order.
3374     ArrayRef<unsigned> BestOrder = OrdersUses.front().first;
3375     unsigned Cnt = OrdersUses.front().second;
3376     for (const auto &Pair : drop_begin(OrdersUses)) {
3377       if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) {
3378         BestOrder = Pair.first;
3379         Cnt = Pair.second;
3380       }
3381     }
3382     // Set order of the user node.
3383     if (BestOrder.empty())
3384       continue;
3385     SmallVector<int> Mask;
3386     inversePermutation(BestOrder, Mask);
3387     SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem);
3388     unsigned E = BestOrder.size();
3389     transform(BestOrder, MaskOrder.begin(), [E](unsigned I) {
3390       return I < E ? static_cast<int>(I) : UndefMaskElem;
3391     });
3392     // Do an actual reordering, if profitable.
3393     for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
3394       // Just do the reordering for the nodes with the given VF.
3395       if (TE->Scalars.size() != VF) {
3396         if (TE->ReuseShuffleIndices.size() == VF) {
3397           // Need to reorder the reuses masks of the operands with smaller VF to
3398           // be able to find the match between the graph nodes and scalar
3399           // operands of the given node during vectorization/cost estimation.
3400           assert(all_of(TE->UserTreeIndices,
3401                         [VF, &TE](const EdgeInfo &EI) {
3402                           return EI.UserTE->Scalars.size() == VF ||
3403                                  EI.UserTE->Scalars.size() ==
3404                                      TE->Scalars.size();
3405                         }) &&
3406                  "All users must be of VF size.");
3407           // Update ordering of the operands with the smaller VF than the given
3408           // one.
3409           reorderReuses(TE->ReuseShuffleIndices, Mask);
3410         }
3411         continue;
3412       }
3413       if (TE->State == TreeEntry::Vectorize &&
3414           isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst,
3415               InsertElementInst>(TE->getMainOp()) &&
3416           !TE->isAltShuffle()) {
3417         // Build correct orders for extract{element,value}, loads and
3418         // stores.
3419         reorderOrder(TE->ReorderIndices, Mask);
3420         if (isa<InsertElementInst, StoreInst>(TE->getMainOp()))
3421           TE->reorderOperands(Mask);
3422       } else {
3423         // Reorder the node and its operands.
3424         TE->reorderOperands(Mask);
3425         assert(TE->ReorderIndices.empty() &&
3426                "Expected empty reorder sequence.");
3427         reorderScalars(TE->Scalars, Mask);
3428       }
3429       if (!TE->ReuseShuffleIndices.empty()) {
3430         // Apply reversed order to keep the original ordering of the reused
3431         // elements to avoid extra reorder indices shuffling.
3432         OrdersType CurrentOrder;
3433         reorderOrder(CurrentOrder, MaskOrder);
3434         SmallVector<int> NewReuses;
3435         inversePermutation(CurrentOrder, NewReuses);
3436         addMask(NewReuses, TE->ReuseShuffleIndices);
3437         TE->ReuseShuffleIndices.swap(NewReuses);
3438       }
3439     }
3440   }
3441 }
3442 
3443 bool BoUpSLP::canReorderOperands(
3444     TreeEntry *UserTE, SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges,
3445     ArrayRef<TreeEntry *> ReorderableGathers,
3446     SmallVectorImpl<TreeEntry *> &GatherOps) {
3447   for (unsigned I = 0, E = UserTE->getNumOperands(); I < E; ++I) {
3448     if (any_of(Edges, [I](const std::pair<unsigned, TreeEntry *> &OpData) {
3449           return OpData.first == I &&
3450                  OpData.second->State == TreeEntry::Vectorize;
3451         }))
3452       continue;
3453     if (TreeEntry *TE = getVectorizedOperand(UserTE, I)) {
3454       // Do not reorder if operand node is used by many user nodes.
3455       if (any_of(TE->UserTreeIndices,
3456                  [UserTE](const EdgeInfo &EI) { return EI.UserTE != UserTE; }))
3457         return false;
3458       // Add the node to the list of the ordered nodes with the identity
3459       // order.
3460       Edges.emplace_back(I, TE);
3461       continue;
3462     }
3463     ArrayRef<Value *> VL = UserTE->getOperand(I);
3464     TreeEntry *Gather = nullptr;
3465     if (count_if(ReorderableGathers, [VL, &Gather](TreeEntry *TE) {
3466           assert(TE->State != TreeEntry::Vectorize &&
3467                  "Only non-vectorized nodes are expected.");
3468           if (TE->isSame(VL)) {
3469             Gather = TE;
3470             return true;
3471           }
3472           return false;
3473         }) > 1)
3474       return false;
3475     if (Gather)
3476       GatherOps.push_back(Gather);
3477   }
3478   return true;
3479 }
3480 
3481 void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) {
3482   SetVector<TreeEntry *> OrderedEntries;
3483   DenseMap<const TreeEntry *, OrdersType> GathersToOrders;
3484   // Find all reorderable leaf nodes with the given VF.
3485   // Currently the are vectorized loads,extracts without alternate operands +
3486   // some gathering of extracts.
3487   SmallVector<TreeEntry *> NonVectorized;
3488   for_each(VectorizableTree, [this, &OrderedEntries, &GathersToOrders,
3489                               &NonVectorized](
3490                                  const std::unique_ptr<TreeEntry> &TE) {
3491     if (TE->State != TreeEntry::Vectorize)
3492       NonVectorized.push_back(TE.get());
3493     if (Optional<OrdersType> CurrentOrder =
3494             getReorderingData(*TE.get(), /*TopToBottom=*/false)) {
3495       OrderedEntries.insert(TE.get());
3496       if (TE->State != TreeEntry::Vectorize)
3497         GathersToOrders.try_emplace(TE.get(), *CurrentOrder);
3498     }
3499   });
3500 
3501   // 1. Propagate order to the graph nodes, which use only reordered nodes.
3502   // I.e., if the node has operands, that are reordered, try to make at least
3503   // one operand order in the natural order and reorder others + reorder the
3504   // user node itself.
3505   SmallPtrSet<const TreeEntry *, 4> Visited;
3506   while (!OrderedEntries.empty()) {
3507     // 1. Filter out only reordered nodes.
3508     // 2. If the entry has multiple uses - skip it and jump to the next node.
3509     MapVector<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>> Users;
3510     SmallVector<TreeEntry *> Filtered;
3511     for (TreeEntry *TE : OrderedEntries) {
3512       if (!(TE->State == TreeEntry::Vectorize ||
3513             (TE->State == TreeEntry::NeedToGather &&
3514              GathersToOrders.count(TE))) ||
3515           TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() ||
3516           !all_of(drop_begin(TE->UserTreeIndices),
3517                   [TE](const EdgeInfo &EI) {
3518                     return EI.UserTE == TE->UserTreeIndices.front().UserTE;
3519                   }) ||
3520           !Visited.insert(TE).second) {
3521         Filtered.push_back(TE);
3522         continue;
3523       }
3524       // Build a map between user nodes and their operands order to speedup
3525       // search. The graph currently does not provide this dependency directly.
3526       for (EdgeInfo &EI : TE->UserTreeIndices) {
3527         TreeEntry *UserTE = EI.UserTE;
3528         auto It = Users.find(UserTE);
3529         if (It == Users.end())
3530           It = Users.insert({UserTE, {}}).first;
3531         It->second.emplace_back(EI.EdgeIdx, TE);
3532       }
3533     }
3534     // Erase filtered entries.
3535     for_each(Filtered,
3536              [&OrderedEntries](TreeEntry *TE) { OrderedEntries.remove(TE); });
3537     for (auto &Data : Users) {
3538       // Check that operands are used only in the User node.
3539       SmallVector<TreeEntry *> GatherOps;
3540       if (!canReorderOperands(Data.first, Data.second, NonVectorized,
3541                               GatherOps)) {
3542         for_each(Data.second,
3543                  [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) {
3544                    OrderedEntries.remove(Op.second);
3545                  });
3546         continue;
3547       }
3548       // All operands are reordered and used only in this node - propagate the
3549       // most used order to the user node.
3550       MapVector<OrdersType, unsigned,
3551                 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>>
3552           OrdersUses;
3553       // Do the analysis for each tree entry only once, otherwise the order of
3554       // the same node my be considered several times, though might be not
3555       // profitable.
3556       SmallPtrSet<const TreeEntry *, 4> VisitedOps;
3557       SmallPtrSet<const TreeEntry *, 4> VisitedUsers;
3558       for (const auto &Op : Data.second) {
3559         TreeEntry *OpTE = Op.second;
3560         if (!VisitedOps.insert(OpTE).second)
3561           continue;
3562         if (!OpTE->ReuseShuffleIndices.empty() ||
3563             (IgnoreReorder && OpTE == VectorizableTree.front().get()))
3564           continue;
3565         const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & {
3566           if (OpTE->State == TreeEntry::NeedToGather)
3567             return GathersToOrders.find(OpTE)->second;
3568           return OpTE->ReorderIndices;
3569         }();
3570         unsigned NumOps = count_if(
3571             Data.second, [OpTE](const std::pair<unsigned, TreeEntry *> &P) {
3572               return P.second == OpTE;
3573             });
3574         // Stores actually store the mask, not the order, need to invert.
3575         if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() &&
3576             OpTE->getOpcode() == Instruction::Store && !Order.empty()) {
3577           SmallVector<int> Mask;
3578           inversePermutation(Order, Mask);
3579           unsigned E = Order.size();
3580           OrdersType CurrentOrder(E, E);
3581           transform(Mask, CurrentOrder.begin(), [E](int Idx) {
3582             return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx);
3583           });
3584           fixupOrderingIndices(CurrentOrder);
3585           OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second +=
3586               NumOps;
3587         } else {
3588           OrdersUses.insert(std::make_pair(Order, 0)).first->second += NumOps;
3589         }
3590         auto Res = OrdersUses.insert(std::make_pair(OrdersType(), 0));
3591         const auto &&AllowsReordering = [IgnoreReorder, &GathersToOrders](
3592                                             const TreeEntry *TE) {
3593           if (!TE->ReorderIndices.empty() || !TE->ReuseShuffleIndices.empty() ||
3594               (TE->State == TreeEntry::Vectorize && TE->isAltShuffle()) ||
3595               (IgnoreReorder && TE->Idx == 0))
3596             return true;
3597           if (TE->State == TreeEntry::NeedToGather) {
3598             auto It = GathersToOrders.find(TE);
3599             if (It != GathersToOrders.end())
3600               return !It->second.empty();
3601             return true;
3602           }
3603           return false;
3604         };
3605         for (const EdgeInfo &EI : OpTE->UserTreeIndices) {
3606           TreeEntry *UserTE = EI.UserTE;
3607           if (!VisitedUsers.insert(UserTE).second)
3608             continue;
3609           // May reorder user node if it requires reordering, has reused
3610           // scalars, is an alternate op vectorize node or its op nodes require
3611           // reordering.
3612           if (AllowsReordering(UserTE))
3613             continue;
3614           // Check if users allow reordering.
3615           // Currently look up just 1 level of operands to avoid increase of
3616           // the compile time.
3617           // Profitable to reorder if definitely more operands allow
3618           // reordering rather than those with natural order.
3619           ArrayRef<std::pair<unsigned, TreeEntry *>> Ops = Users[UserTE];
3620           if (static_cast<unsigned>(count_if(
3621                   Ops, [UserTE, &AllowsReordering](
3622                            const std::pair<unsigned, TreeEntry *> &Op) {
3623                     return AllowsReordering(Op.second) &&
3624                            all_of(Op.second->UserTreeIndices,
3625                                   [UserTE](const EdgeInfo &EI) {
3626                                     return EI.UserTE == UserTE;
3627                                   });
3628                   })) <= Ops.size() / 2)
3629             ++Res.first->second;
3630         }
3631       }
3632       // If no orders - skip current nodes and jump to the next one, if any.
3633       if (OrdersUses.empty()) {
3634         for_each(Data.second,
3635                  [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) {
3636                    OrderedEntries.remove(Op.second);
3637                  });
3638         continue;
3639       }
3640       // Choose the best order.
3641       ArrayRef<unsigned> BestOrder = OrdersUses.front().first;
3642       unsigned Cnt = OrdersUses.front().second;
3643       for (const auto &Pair : drop_begin(OrdersUses)) {
3644         if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) {
3645           BestOrder = Pair.first;
3646           Cnt = Pair.second;
3647         }
3648       }
3649       // Set order of the user node (reordering of operands and user nodes).
3650       if (BestOrder.empty()) {
3651         for_each(Data.second,
3652                  [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) {
3653                    OrderedEntries.remove(Op.second);
3654                  });
3655         continue;
3656       }
3657       // Erase operands from OrderedEntries list and adjust their orders.
3658       VisitedOps.clear();
3659       SmallVector<int> Mask;
3660       inversePermutation(BestOrder, Mask);
3661       SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem);
3662       unsigned E = BestOrder.size();
3663       transform(BestOrder, MaskOrder.begin(), [E](unsigned I) {
3664         return I < E ? static_cast<int>(I) : UndefMaskElem;
3665       });
3666       for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) {
3667         TreeEntry *TE = Op.second;
3668         OrderedEntries.remove(TE);
3669         if (!VisitedOps.insert(TE).second)
3670           continue;
3671         if (TE->ReuseShuffleIndices.size() == BestOrder.size()) {
3672           // Just reorder reuses indices.
3673           reorderReuses(TE->ReuseShuffleIndices, Mask);
3674           continue;
3675         }
3676         // Gathers are processed separately.
3677         if (TE->State != TreeEntry::Vectorize)
3678           continue;
3679         assert((BestOrder.size() == TE->ReorderIndices.size() ||
3680                 TE->ReorderIndices.empty()) &&
3681                "Non-matching sizes of user/operand entries.");
3682         reorderOrder(TE->ReorderIndices, Mask);
3683       }
3684       // For gathers just need to reorder its scalars.
3685       for (TreeEntry *Gather : GatherOps) {
3686         assert(Gather->ReorderIndices.empty() &&
3687                "Unexpected reordering of gathers.");
3688         if (!Gather->ReuseShuffleIndices.empty()) {
3689           // Just reorder reuses indices.
3690           reorderReuses(Gather->ReuseShuffleIndices, Mask);
3691           continue;
3692         }
3693         reorderScalars(Gather->Scalars, Mask);
3694         OrderedEntries.remove(Gather);
3695       }
3696       // Reorder operands of the user node and set the ordering for the user
3697       // node itself.
3698       if (Data.first->State != TreeEntry::Vectorize ||
3699           !isa<ExtractElementInst, ExtractValueInst, LoadInst>(
3700               Data.first->getMainOp()) ||
3701           Data.first->isAltShuffle())
3702         Data.first->reorderOperands(Mask);
3703       if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) ||
3704           Data.first->isAltShuffle()) {
3705         reorderScalars(Data.first->Scalars, Mask);
3706         reorderOrder(Data.first->ReorderIndices, MaskOrder);
3707         if (Data.first->ReuseShuffleIndices.empty() &&
3708             !Data.first->ReorderIndices.empty() &&
3709             !Data.first->isAltShuffle()) {
3710           // Insert user node to the list to try to sink reordering deeper in
3711           // the graph.
3712           OrderedEntries.insert(Data.first);
3713         }
3714       } else {
3715         reorderOrder(Data.first->ReorderIndices, Mask);
3716       }
3717     }
3718   }
3719   // If the reordering is unnecessary, just remove the reorder.
3720   if (IgnoreReorder && !VectorizableTree.front()->ReorderIndices.empty() &&
3721       VectorizableTree.front()->ReuseShuffleIndices.empty())
3722     VectorizableTree.front()->ReorderIndices.clear();
3723 }
3724 
3725 void BoUpSLP::buildExternalUses(
3726     const ExtraValueToDebugLocsMap &ExternallyUsedValues) {
3727   // Collect the values that we need to extract from the tree.
3728   for (auto &TEPtr : VectorizableTree) {
3729     TreeEntry *Entry = TEPtr.get();
3730 
3731     // No need to handle users of gathered values.
3732     if (Entry->State == TreeEntry::NeedToGather)
3733       continue;
3734 
3735     // For each lane:
3736     for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
3737       Value *Scalar = Entry->Scalars[Lane];
3738       int FoundLane = Entry->findLaneForValue(Scalar);
3739 
3740       // Check if the scalar is externally used as an extra arg.
3741       auto ExtI = ExternallyUsedValues.find(Scalar);
3742       if (ExtI != ExternallyUsedValues.end()) {
3743         LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane "
3744                           << Lane << " from " << *Scalar << ".\n");
3745         ExternalUses.emplace_back(Scalar, nullptr, FoundLane);
3746       }
3747       for (User *U : Scalar->users()) {
3748         LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
3749 
3750         Instruction *UserInst = dyn_cast<Instruction>(U);
3751         if (!UserInst)
3752           continue;
3753 
3754         if (isDeleted(UserInst))
3755           continue;
3756 
3757         // Skip in-tree scalars that become vectors
3758         if (TreeEntry *UseEntry = getTreeEntry(U)) {
3759           Value *UseScalar = UseEntry->Scalars[0];
3760           // Some in-tree scalars will remain as scalar in vectorized
3761           // instructions. If that is the case, the one in Lane 0 will
3762           // be used.
3763           if (UseScalar != U ||
3764               UseEntry->State == TreeEntry::ScatterVectorize ||
3765               !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) {
3766             LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
3767                               << ".\n");
3768             assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state");
3769             continue;
3770           }
3771         }
3772 
3773         // Ignore users in the user ignore list.
3774         if (is_contained(UserIgnoreList, UserInst))
3775           continue;
3776 
3777         LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane "
3778                           << Lane << " from " << *Scalar << ".\n");
3779         ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane));
3780       }
3781     }
3782   }
3783 }
3784 
3785 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
3786                         ArrayRef<Value *> UserIgnoreLst) {
3787   deleteTree();
3788   UserIgnoreList = UserIgnoreLst;
3789   if (!allSameType(Roots))
3790     return;
3791   buildTree_rec(Roots, 0, EdgeInfo());
3792 }
3793 
3794 namespace {
3795 /// Tracks the state we can represent the loads in the given sequence.
3796 enum class LoadsState { Gather, Vectorize, ScatterVectorize };
3797 } // anonymous namespace
3798 
3799 /// Checks if the given array of loads can be represented as a vectorized,
3800 /// scatter or just simple gather.
3801 static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0,
3802                                     const TargetTransformInfo &TTI,
3803                                     const DataLayout &DL, ScalarEvolution &SE,
3804                                     SmallVectorImpl<unsigned> &Order,
3805                                     SmallVectorImpl<Value *> &PointerOps) {
3806   // Check that a vectorized load would load the same memory as a scalar
3807   // load. For example, we don't want to vectorize loads that are smaller
3808   // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
3809   // treats loading/storing it as an i8 struct. If we vectorize loads/stores
3810   // from such a struct, we read/write packed bits disagreeing with the
3811   // unvectorized version.
3812   Type *ScalarTy = VL0->getType();
3813 
3814   if (DL.getTypeSizeInBits(ScalarTy) != DL.getTypeAllocSizeInBits(ScalarTy))
3815     return LoadsState::Gather;
3816 
3817   // Make sure all loads in the bundle are simple - we can't vectorize
3818   // atomic or volatile loads.
3819   PointerOps.clear();
3820   PointerOps.resize(VL.size());
3821   auto *POIter = PointerOps.begin();
3822   for (Value *V : VL) {
3823     auto *L = cast<LoadInst>(V);
3824     if (!L->isSimple())
3825       return LoadsState::Gather;
3826     *POIter = L->getPointerOperand();
3827     ++POIter;
3828   }
3829 
3830   Order.clear();
3831   // Check the order of pointer operands.
3832   if (llvm::sortPtrAccesses(PointerOps, ScalarTy, DL, SE, Order)) {
3833     Value *Ptr0;
3834     Value *PtrN;
3835     if (Order.empty()) {
3836       Ptr0 = PointerOps.front();
3837       PtrN = PointerOps.back();
3838     } else {
3839       Ptr0 = PointerOps[Order.front()];
3840       PtrN = PointerOps[Order.back()];
3841     }
3842     Optional<int> Diff =
3843         getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE);
3844     // Check that the sorted loads are consecutive.
3845     if (static_cast<unsigned>(*Diff) == VL.size() - 1)
3846       return LoadsState::Vectorize;
3847     Align CommonAlignment = cast<LoadInst>(VL0)->getAlign();
3848     for (Value *V : VL)
3849       CommonAlignment =
3850           commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign());
3851     if (TTI.isLegalMaskedGather(FixedVectorType::get(ScalarTy, VL.size()),
3852                                 CommonAlignment))
3853       return LoadsState::ScatterVectorize;
3854   }
3855 
3856   return LoadsState::Gather;
3857 }
3858 
3859 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
3860                             const EdgeInfo &UserTreeIdx) {
3861   assert((allConstant(VL) || allSameType(VL)) && "Invalid types!");
3862 
3863   SmallVector<int> ReuseShuffleIndicies;
3864   SmallVector<Value *> UniqueValues;
3865   auto &&TryToFindDuplicates = [&VL, &ReuseShuffleIndicies, &UniqueValues,
3866                                 &UserTreeIdx,
3867                                 this](const InstructionsState &S) {
3868     // Check that every instruction appears once in this bundle.
3869     DenseMap<Value *, unsigned> UniquePositions;
3870     for (Value *V : VL) {
3871       if (isConstant(V)) {
3872         ReuseShuffleIndicies.emplace_back(
3873             isa<UndefValue>(V) ? UndefMaskElem : UniqueValues.size());
3874         UniqueValues.emplace_back(V);
3875         continue;
3876       }
3877       auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
3878       ReuseShuffleIndicies.emplace_back(Res.first->second);
3879       if (Res.second)
3880         UniqueValues.emplace_back(V);
3881     }
3882     size_t NumUniqueScalarValues = UniqueValues.size();
3883     if (NumUniqueScalarValues == VL.size()) {
3884       ReuseShuffleIndicies.clear();
3885     } else {
3886       LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n");
3887       if (NumUniqueScalarValues <= 1 ||
3888           (UniquePositions.size() == 1 && all_of(UniqueValues,
3889                                                  [](Value *V) {
3890                                                    return isa<UndefValue>(V) ||
3891                                                           !isConstant(V);
3892                                                  })) ||
3893           !llvm::isPowerOf2_32(NumUniqueScalarValues)) {
3894         LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
3895         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
3896         return false;
3897       }
3898       VL = UniqueValues;
3899     }
3900     return true;
3901   };
3902 
3903   InstructionsState S = getSameOpcode(VL);
3904   if (Depth == RecursionMaxDepth) {
3905     LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
3906     if (TryToFindDuplicates(S))
3907       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3908                    ReuseShuffleIndicies);
3909     return;
3910   }
3911 
3912   // Don't handle scalable vectors
3913   if (S.getOpcode() == Instruction::ExtractElement &&
3914       isa<ScalableVectorType>(
3915           cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) {
3916     LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n");
3917     if (TryToFindDuplicates(S))
3918       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3919                    ReuseShuffleIndicies);
3920     return;
3921   }
3922 
3923   // Don't handle vectors.
3924   if (S.OpValue->getType()->isVectorTy() &&
3925       !isa<InsertElementInst>(S.OpValue)) {
3926     LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
3927     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
3928     return;
3929   }
3930 
3931   // Avoid attempting to schedule allocas; there are unmodeled dependencies
3932   // for "static" alloca status and for reordering with stacksave calls.
3933   for (Value *V : VL) {
3934     if (isa<AllocaInst>(V)) {
3935       LLVM_DEBUG(dbgs() << "SLP: Gathering due to alloca.\n");
3936       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
3937       return;
3938     }
3939   }
3940 
3941   if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue))
3942     if (SI->getValueOperand()->getType()->isVectorTy()) {
3943       LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
3944       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
3945       return;
3946     }
3947 
3948   // If all of the operands are identical or constant we have a simple solution.
3949   // If we deal with insert/extract instructions, they all must have constant
3950   // indices, otherwise we should gather them, not try to vectorize.
3951   if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.getOpcode() ||
3952       (isa<InsertElementInst, ExtractValueInst, ExtractElementInst>(S.MainOp) &&
3953        !all_of(VL, isVectorLikeInstWithConstOps))) {
3954     LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
3955     if (TryToFindDuplicates(S))
3956       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3957                    ReuseShuffleIndicies);
3958     return;
3959   }
3960 
3961   // We now know that this is a vector of instructions of the same type from
3962   // the same block.
3963 
3964   // Don't vectorize ephemeral values.
3965   for (Value *V : VL) {
3966     if (EphValues.count(V)) {
3967       LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
3968                         << ") is ephemeral.\n");
3969       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
3970       return;
3971     }
3972   }
3973 
3974   // Check if this is a duplicate of another entry.
3975   if (TreeEntry *E = getTreeEntry(S.OpValue)) {
3976     LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n");
3977     if (!E->isSame(VL)) {
3978       LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
3979       if (TryToFindDuplicates(S))
3980         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3981                      ReuseShuffleIndicies);
3982       return;
3983     }
3984     // Record the reuse of the tree node.  FIXME, currently this is only used to
3985     // properly draw the graph rather than for the actual vectorization.
3986     E->UserTreeIndices.push_back(UserTreeIdx);
3987     LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue
3988                       << ".\n");
3989     return;
3990   }
3991 
3992   // Check that none of the instructions in the bundle are already in the tree.
3993   for (Value *V : VL) {
3994     auto *I = dyn_cast<Instruction>(V);
3995     if (!I)
3996       continue;
3997     if (getTreeEntry(I)) {
3998       LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
3999                         << ") is already in tree.\n");
4000       if (TryToFindDuplicates(S))
4001         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4002                      ReuseShuffleIndicies);
4003       return;
4004     }
4005   }
4006 
4007   // The reduction nodes (stored in UserIgnoreList) also should stay scalar.
4008   for (Value *V : VL) {
4009     if (is_contained(UserIgnoreList, V)) {
4010       LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
4011       if (TryToFindDuplicates(S))
4012         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4013                      ReuseShuffleIndicies);
4014       return;
4015     }
4016   }
4017 
4018   // Check that all of the users of the scalars that we want to vectorize are
4019   // schedulable.
4020   auto *VL0 = cast<Instruction>(S.OpValue);
4021   BasicBlock *BB = VL0->getParent();
4022 
4023   if (!DT->isReachableFromEntry(BB)) {
4024     // Don't go into unreachable blocks. They may contain instructions with
4025     // dependency cycles which confuse the final scheduling.
4026     LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
4027     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
4028     return;
4029   }
4030 
4031   // Check that every instruction appears once in this bundle.
4032   if (!TryToFindDuplicates(S))
4033     return;
4034 
4035   auto &BSRef = BlocksSchedules[BB];
4036   if (!BSRef)
4037     BSRef = std::make_unique<BlockScheduling>(BB);
4038 
4039   BlockScheduling &BS = *BSRef.get();
4040 
4041   Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S);
4042 #ifdef EXPENSIVE_CHECKS
4043   // Make sure we didn't break any internal invariants
4044   BS.verify();
4045 #endif
4046   if (!Bundle) {
4047     LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
4048     assert((!BS.getScheduleData(VL0) ||
4049             !BS.getScheduleData(VL0)->isPartOfBundle()) &&
4050            "tryScheduleBundle should cancelScheduling on failure");
4051     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4052                  ReuseShuffleIndicies);
4053     return;
4054   }
4055   LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
4056 
4057   unsigned ShuffleOrOp = S.isAltShuffle() ?
4058                 (unsigned) Instruction::ShuffleVector : S.getOpcode();
4059   switch (ShuffleOrOp) {
4060     case Instruction::PHI: {
4061       auto *PH = cast<PHINode>(VL0);
4062 
4063       // Check for terminator values (e.g. invoke).
4064       for (Value *V : VL)
4065         for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) {
4066           Instruction *Term = dyn_cast<Instruction>(
4067               cast<PHINode>(V)->getIncomingValueForBlock(
4068                   PH->getIncomingBlock(I)));
4069           if (Term && Term->isTerminator()) {
4070             LLVM_DEBUG(dbgs()
4071                        << "SLP: Need to swizzle PHINodes (terminator use).\n");
4072             BS.cancelScheduling(VL, VL0);
4073             newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4074                          ReuseShuffleIndicies);
4075             return;
4076           }
4077         }
4078 
4079       TreeEntry *TE =
4080           newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies);
4081       LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
4082 
4083       // Keeps the reordered operands to avoid code duplication.
4084       SmallVector<ValueList, 2> OperandsVec;
4085       for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) {
4086         if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) {
4087           ValueList Operands(VL.size(), PoisonValue::get(PH->getType()));
4088           TE->setOperand(I, Operands);
4089           OperandsVec.push_back(Operands);
4090           continue;
4091         }
4092         ValueList Operands;
4093         // Prepare the operand vector.
4094         for (Value *V : VL)
4095           Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(
4096               PH->getIncomingBlock(I)));
4097         TE->setOperand(I, Operands);
4098         OperandsVec.push_back(Operands);
4099       }
4100       for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx)
4101         buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx});
4102       return;
4103     }
4104     case Instruction::ExtractValue:
4105     case Instruction::ExtractElement: {
4106       OrdersType CurrentOrder;
4107       bool Reuse = canReuseExtract(VL, VL0, CurrentOrder);
4108       if (Reuse) {
4109         LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n");
4110         newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
4111                      ReuseShuffleIndicies);
4112         // This is a special case, as it does not gather, but at the same time
4113         // we are not extending buildTree_rec() towards the operands.
4114         ValueList Op0;
4115         Op0.assign(VL.size(), VL0->getOperand(0));
4116         VectorizableTree.back()->setOperand(0, Op0);
4117         return;
4118       }
4119       if (!CurrentOrder.empty()) {
4120         LLVM_DEBUG({
4121           dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
4122                     "with order";
4123           for (unsigned Idx : CurrentOrder)
4124             dbgs() << " " << Idx;
4125           dbgs() << "\n";
4126         });
4127         fixupOrderingIndices(CurrentOrder);
4128         // Insert new order with initial value 0, if it does not exist,
4129         // otherwise return the iterator to the existing one.
4130         newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
4131                      ReuseShuffleIndicies, CurrentOrder);
4132         // This is a special case, as it does not gather, but at the same time
4133         // we are not extending buildTree_rec() towards the operands.
4134         ValueList Op0;
4135         Op0.assign(VL.size(), VL0->getOperand(0));
4136         VectorizableTree.back()->setOperand(0, Op0);
4137         return;
4138       }
4139       LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n");
4140       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4141                    ReuseShuffleIndicies);
4142       BS.cancelScheduling(VL, VL0);
4143       return;
4144     }
4145     case Instruction::InsertElement: {
4146       assert(ReuseShuffleIndicies.empty() && "All inserts should be unique");
4147 
4148       // Check that we have a buildvector and not a shuffle of 2 or more
4149       // different vectors.
4150       ValueSet SourceVectors;
4151       for (Value *V : VL) {
4152         SourceVectors.insert(cast<Instruction>(V)->getOperand(0));
4153         assert(getInsertIndex(V) != None && "Non-constant or undef index?");
4154       }
4155 
4156       if (count_if(VL, [&SourceVectors](Value *V) {
4157             return !SourceVectors.contains(V);
4158           }) >= 2) {
4159         // Found 2nd source vector - cancel.
4160         LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with "
4161                              "different source vectors.\n");
4162         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
4163         BS.cancelScheduling(VL, VL0);
4164         return;
4165       }
4166 
4167       auto OrdCompare = [](const std::pair<int, int> &P1,
4168                            const std::pair<int, int> &P2) {
4169         return P1.first > P2.first;
4170       };
4171       PriorityQueue<std::pair<int, int>, SmallVector<std::pair<int, int>>,
4172                     decltype(OrdCompare)>
4173           Indices(OrdCompare);
4174       for (int I = 0, E = VL.size(); I < E; ++I) {
4175         unsigned Idx = *getInsertIndex(VL[I]);
4176         Indices.emplace(Idx, I);
4177       }
4178       OrdersType CurrentOrder(VL.size(), VL.size());
4179       bool IsIdentity = true;
4180       for (int I = 0, E = VL.size(); I < E; ++I) {
4181         CurrentOrder[Indices.top().second] = I;
4182         IsIdentity &= Indices.top().second == I;
4183         Indices.pop();
4184       }
4185       if (IsIdentity)
4186         CurrentOrder.clear();
4187       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
4188                                    None, CurrentOrder);
4189       LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n");
4190 
4191       constexpr int NumOps = 2;
4192       ValueList VectorOperands[NumOps];
4193       for (int I = 0; I < NumOps; ++I) {
4194         for (Value *V : VL)
4195           VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I));
4196 
4197         TE->setOperand(I, VectorOperands[I]);
4198       }
4199       buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, NumOps - 1});
4200       return;
4201     }
4202     case Instruction::Load: {
4203       // Check that a vectorized load would load the same memory as a scalar
4204       // load. For example, we don't want to vectorize loads that are smaller
4205       // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
4206       // treats loading/storing it as an i8 struct. If we vectorize loads/stores
4207       // from such a struct, we read/write packed bits disagreeing with the
4208       // unvectorized version.
4209       SmallVector<Value *> PointerOps;
4210       OrdersType CurrentOrder;
4211       TreeEntry *TE = nullptr;
4212       switch (canVectorizeLoads(VL, VL0, *TTI, *DL, *SE, CurrentOrder,
4213                                 PointerOps)) {
4214       case LoadsState::Vectorize:
4215         if (CurrentOrder.empty()) {
4216           // Original loads are consecutive and does not require reordering.
4217           TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
4218                             ReuseShuffleIndicies);
4219           LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n");
4220         } else {
4221           fixupOrderingIndices(CurrentOrder);
4222           // Need to reorder.
4223           TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
4224                             ReuseShuffleIndicies, CurrentOrder);
4225           LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n");
4226         }
4227         TE->setOperandsInOrder();
4228         break;
4229       case LoadsState::ScatterVectorize:
4230         // Vectorizing non-consecutive loads with `llvm.masked.gather`.
4231         TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S,
4232                           UserTreeIdx, ReuseShuffleIndicies);
4233         TE->setOperandsInOrder();
4234         buildTree_rec(PointerOps, Depth + 1, {TE, 0});
4235         LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n");
4236         break;
4237       case LoadsState::Gather:
4238         BS.cancelScheduling(VL, VL0);
4239         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4240                      ReuseShuffleIndicies);
4241 #ifndef NDEBUG
4242         Type *ScalarTy = VL0->getType();
4243         if (DL->getTypeSizeInBits(ScalarTy) !=
4244             DL->getTypeAllocSizeInBits(ScalarTy))
4245           LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
4246         else if (any_of(VL, [](Value *V) {
4247                    return !cast<LoadInst>(V)->isSimple();
4248                  }))
4249           LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
4250         else
4251           LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
4252 #endif // NDEBUG
4253         break;
4254       }
4255       return;
4256     }
4257     case Instruction::ZExt:
4258     case Instruction::SExt:
4259     case Instruction::FPToUI:
4260     case Instruction::FPToSI:
4261     case Instruction::FPExt:
4262     case Instruction::PtrToInt:
4263     case Instruction::IntToPtr:
4264     case Instruction::SIToFP:
4265     case Instruction::UIToFP:
4266     case Instruction::Trunc:
4267     case Instruction::FPTrunc:
4268     case Instruction::BitCast: {
4269       Type *SrcTy = VL0->getOperand(0)->getType();
4270       for (Value *V : VL) {
4271         Type *Ty = cast<Instruction>(V)->getOperand(0)->getType();
4272         if (Ty != SrcTy || !isValidElementType(Ty)) {
4273           BS.cancelScheduling(VL, VL0);
4274           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4275                        ReuseShuffleIndicies);
4276           LLVM_DEBUG(dbgs()
4277                      << "SLP: Gathering casts with different src types.\n");
4278           return;
4279         }
4280       }
4281       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
4282                                    ReuseShuffleIndicies);
4283       LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n");
4284 
4285       TE->setOperandsInOrder();
4286       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
4287         ValueList Operands;
4288         // Prepare the operand vector.
4289         for (Value *V : VL)
4290           Operands.push_back(cast<Instruction>(V)->getOperand(i));
4291 
4292         buildTree_rec(Operands, Depth + 1, {TE, i});
4293       }
4294       return;
4295     }
4296     case Instruction::ICmp:
4297     case Instruction::FCmp: {
4298       // Check that all of the compares have the same predicate.
4299       CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
4300       CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0);
4301       Type *ComparedTy = VL0->getOperand(0)->getType();
4302       for (Value *V : VL) {
4303         CmpInst *Cmp = cast<CmpInst>(V);
4304         if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) ||
4305             Cmp->getOperand(0)->getType() != ComparedTy) {
4306           BS.cancelScheduling(VL, VL0);
4307           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4308                        ReuseShuffleIndicies);
4309           LLVM_DEBUG(dbgs()
4310                      << "SLP: Gathering cmp with different predicate.\n");
4311           return;
4312         }
4313       }
4314 
4315       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
4316                                    ReuseShuffleIndicies);
4317       LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n");
4318 
4319       ValueList Left, Right;
4320       if (cast<CmpInst>(VL0)->isCommutative()) {
4321         // Commutative predicate - collect + sort operands of the instructions
4322         // so that each side is more likely to have the same opcode.
4323         assert(P0 == SwapP0 && "Commutative Predicate mismatch");
4324         reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
4325       } else {
4326         // Collect operands - commute if it uses the swapped predicate.
4327         for (Value *V : VL) {
4328           auto *Cmp = cast<CmpInst>(V);
4329           Value *LHS = Cmp->getOperand(0);
4330           Value *RHS = Cmp->getOperand(1);
4331           if (Cmp->getPredicate() != P0)
4332             std::swap(LHS, RHS);
4333           Left.push_back(LHS);
4334           Right.push_back(RHS);
4335         }
4336       }
4337       TE->setOperand(0, Left);
4338       TE->setOperand(1, Right);
4339       buildTree_rec(Left, Depth + 1, {TE, 0});
4340       buildTree_rec(Right, Depth + 1, {TE, 1});
4341       return;
4342     }
4343     case Instruction::Select:
4344     case Instruction::FNeg:
4345     case Instruction::Add:
4346     case Instruction::FAdd:
4347     case Instruction::Sub:
4348     case Instruction::FSub:
4349     case Instruction::Mul:
4350     case Instruction::FMul:
4351     case Instruction::UDiv:
4352     case Instruction::SDiv:
4353     case Instruction::FDiv:
4354     case Instruction::URem:
4355     case Instruction::SRem:
4356     case Instruction::FRem:
4357     case Instruction::Shl:
4358     case Instruction::LShr:
4359     case Instruction::AShr:
4360     case Instruction::And:
4361     case Instruction::Or:
4362     case Instruction::Xor: {
4363       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
4364                                    ReuseShuffleIndicies);
4365       LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n");
4366 
4367       // Sort operands of the instructions so that each side is more likely to
4368       // have the same opcode.
4369       if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
4370         ValueList Left, Right;
4371         reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
4372         TE->setOperand(0, Left);
4373         TE->setOperand(1, Right);
4374         buildTree_rec(Left, Depth + 1, {TE, 0});
4375         buildTree_rec(Right, Depth + 1, {TE, 1});
4376         return;
4377       }
4378 
4379       TE->setOperandsInOrder();
4380       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
4381         ValueList Operands;
4382         // Prepare the operand vector.
4383         for (Value *V : VL)
4384           Operands.push_back(cast<Instruction>(V)->getOperand(i));
4385 
4386         buildTree_rec(Operands, Depth + 1, {TE, i});
4387       }
4388       return;
4389     }
4390     case Instruction::GetElementPtr: {
4391       // We don't combine GEPs with complicated (nested) indexing.
4392       for (Value *V : VL) {
4393         if (cast<Instruction>(V)->getNumOperands() != 2) {
4394           LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
4395           BS.cancelScheduling(VL, VL0);
4396           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4397                        ReuseShuffleIndicies);
4398           return;
4399         }
4400       }
4401 
4402       // We can't combine several GEPs into one vector if they operate on
4403       // different types.
4404       Type *Ty0 = cast<GEPOperator>(VL0)->getSourceElementType();
4405       for (Value *V : VL) {
4406         Type *CurTy = cast<GEPOperator>(V)->getSourceElementType();
4407         if (Ty0 != CurTy) {
4408           LLVM_DEBUG(dbgs()
4409                      << "SLP: not-vectorizable GEP (different types).\n");
4410           BS.cancelScheduling(VL, VL0);
4411           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4412                        ReuseShuffleIndicies);
4413           return;
4414         }
4415       }
4416 
4417       // We don't combine GEPs with non-constant indexes.
4418       Type *Ty1 = VL0->getOperand(1)->getType();
4419       for (Value *V : VL) {
4420         auto Op = cast<Instruction>(V)->getOperand(1);
4421         if (!isa<ConstantInt>(Op) ||
4422             (Op->getType() != Ty1 &&
4423              Op->getType()->getScalarSizeInBits() >
4424                  DL->getIndexSizeInBits(
4425                      V->getType()->getPointerAddressSpace()))) {
4426           LLVM_DEBUG(dbgs()
4427                      << "SLP: not-vectorizable GEP (non-constant indexes).\n");
4428           BS.cancelScheduling(VL, VL0);
4429           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4430                        ReuseShuffleIndicies);
4431           return;
4432         }
4433       }
4434 
4435       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
4436                                    ReuseShuffleIndicies);
4437       LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
4438       SmallVector<ValueList, 2> Operands(2);
4439       // Prepare the operand vector for pointer operands.
4440       for (Value *V : VL)
4441         Operands.front().push_back(
4442             cast<GetElementPtrInst>(V)->getPointerOperand());
4443       TE->setOperand(0, Operands.front());
4444       // Need to cast all indices to the same type before vectorization to
4445       // avoid crash.
4446       // Required to be able to find correct matches between different gather
4447       // nodes and reuse the vectorized values rather than trying to gather them
4448       // again.
4449       int IndexIdx = 1;
4450       Type *VL0Ty = VL0->getOperand(IndexIdx)->getType();
4451       Type *Ty = all_of(VL,
4452                         [VL0Ty, IndexIdx](Value *V) {
4453                           return VL0Ty == cast<GetElementPtrInst>(V)
4454                                               ->getOperand(IndexIdx)
4455                                               ->getType();
4456                         })
4457                      ? VL0Ty
4458                      : DL->getIndexType(cast<GetElementPtrInst>(VL0)
4459                                             ->getPointerOperandType()
4460                                             ->getScalarType());
4461       // Prepare the operand vector.
4462       for (Value *V : VL) {
4463         auto *Op = cast<Instruction>(V)->getOperand(IndexIdx);
4464         auto *CI = cast<ConstantInt>(Op);
4465         Operands.back().push_back(ConstantExpr::getIntegerCast(
4466             CI, Ty, CI->getValue().isSignBitSet()));
4467       }
4468       TE->setOperand(IndexIdx, Operands.back());
4469 
4470       for (unsigned I = 0, Ops = Operands.size(); I < Ops; ++I)
4471         buildTree_rec(Operands[I], Depth + 1, {TE, I});
4472       return;
4473     }
4474     case Instruction::Store: {
4475       // Check if the stores are consecutive or if we need to swizzle them.
4476       llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType();
4477       // Avoid types that are padded when being allocated as scalars, while
4478       // being packed together in a vector (such as i1).
4479       if (DL->getTypeSizeInBits(ScalarTy) !=
4480           DL->getTypeAllocSizeInBits(ScalarTy)) {
4481         BS.cancelScheduling(VL, VL0);
4482         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4483                      ReuseShuffleIndicies);
4484         LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n");
4485         return;
4486       }
4487       // Make sure all stores in the bundle are simple - we can't vectorize
4488       // atomic or volatile stores.
4489       SmallVector<Value *, 4> PointerOps(VL.size());
4490       ValueList Operands(VL.size());
4491       auto POIter = PointerOps.begin();
4492       auto OIter = Operands.begin();
4493       for (Value *V : VL) {
4494         auto *SI = cast<StoreInst>(V);
4495         if (!SI->isSimple()) {
4496           BS.cancelScheduling(VL, VL0);
4497           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4498                        ReuseShuffleIndicies);
4499           LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n");
4500           return;
4501         }
4502         *POIter = SI->getPointerOperand();
4503         *OIter = SI->getValueOperand();
4504         ++POIter;
4505         ++OIter;
4506       }
4507 
4508       OrdersType CurrentOrder;
4509       // Check the order of pointer operands.
4510       if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) {
4511         Value *Ptr0;
4512         Value *PtrN;
4513         if (CurrentOrder.empty()) {
4514           Ptr0 = PointerOps.front();
4515           PtrN = PointerOps.back();
4516         } else {
4517           Ptr0 = PointerOps[CurrentOrder.front()];
4518           PtrN = PointerOps[CurrentOrder.back()];
4519         }
4520         Optional<int> Dist =
4521             getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE);
4522         // Check that the sorted pointer operands are consecutive.
4523         if (static_cast<unsigned>(*Dist) == VL.size() - 1) {
4524           if (CurrentOrder.empty()) {
4525             // Original stores are consecutive and does not require reordering.
4526             TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S,
4527                                          UserTreeIdx, ReuseShuffleIndicies);
4528             TE->setOperandsInOrder();
4529             buildTree_rec(Operands, Depth + 1, {TE, 0});
4530             LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
4531           } else {
4532             fixupOrderingIndices(CurrentOrder);
4533             TreeEntry *TE =
4534                 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
4535                              ReuseShuffleIndicies, CurrentOrder);
4536             TE->setOperandsInOrder();
4537             buildTree_rec(Operands, Depth + 1, {TE, 0});
4538             LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n");
4539           }
4540           return;
4541         }
4542       }
4543 
4544       BS.cancelScheduling(VL, VL0);
4545       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4546                    ReuseShuffleIndicies);
4547       LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
4548       return;
4549     }
4550     case Instruction::Call: {
4551       // Check if the calls are all to the same vectorizable intrinsic or
4552       // library function.
4553       CallInst *CI = cast<CallInst>(VL0);
4554       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4555 
4556       VFShape Shape = VFShape::get(
4557           *CI, ElementCount::getFixed(static_cast<unsigned int>(VL.size())),
4558           false /*HasGlobalPred*/);
4559       Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
4560 
4561       if (!VecFunc && !isTriviallyVectorizable(ID)) {
4562         BS.cancelScheduling(VL, VL0);
4563         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4564                      ReuseShuffleIndicies);
4565         LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
4566         return;
4567       }
4568       Function *F = CI->getCalledFunction();
4569       unsigned NumArgs = CI->arg_size();
4570       SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr);
4571       for (unsigned j = 0; j != NumArgs; ++j)
4572         if (hasVectorInstrinsicScalarOpd(ID, j))
4573           ScalarArgs[j] = CI->getArgOperand(j);
4574       for (Value *V : VL) {
4575         CallInst *CI2 = dyn_cast<CallInst>(V);
4576         if (!CI2 || CI2->getCalledFunction() != F ||
4577             getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
4578             (VecFunc &&
4579              VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) ||
4580             !CI->hasIdenticalOperandBundleSchema(*CI2)) {
4581           BS.cancelScheduling(VL, VL0);
4582           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4583                        ReuseShuffleIndicies);
4584           LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V
4585                             << "\n");
4586           return;
4587         }
4588         // Some intrinsics have scalar arguments and should be same in order for
4589         // them to be vectorized.
4590         for (unsigned j = 0; j != NumArgs; ++j) {
4591           if (hasVectorInstrinsicScalarOpd(ID, j)) {
4592             Value *A1J = CI2->getArgOperand(j);
4593             if (ScalarArgs[j] != A1J) {
4594               BS.cancelScheduling(VL, VL0);
4595               newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4596                            ReuseShuffleIndicies);
4597               LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
4598                                 << " argument " << ScalarArgs[j] << "!=" << A1J
4599                                 << "\n");
4600               return;
4601             }
4602           }
4603         }
4604         // Verify that the bundle operands are identical between the two calls.
4605         if (CI->hasOperandBundles() &&
4606             !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(),
4607                         CI->op_begin() + CI->getBundleOperandsEndIndex(),
4608                         CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
4609           BS.cancelScheduling(VL, VL0);
4610           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4611                        ReuseShuffleIndicies);
4612           LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:"
4613                             << *CI << "!=" << *V << '\n');
4614           return;
4615         }
4616       }
4617 
4618       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
4619                                    ReuseShuffleIndicies);
4620       TE->setOperandsInOrder();
4621       for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) {
4622         // For scalar operands no need to to create an entry since no need to
4623         // vectorize it.
4624         if (hasVectorInstrinsicScalarOpd(ID, i))
4625           continue;
4626         ValueList Operands;
4627         // Prepare the operand vector.
4628         for (Value *V : VL) {
4629           auto *CI2 = cast<CallInst>(V);
4630           Operands.push_back(CI2->getArgOperand(i));
4631         }
4632         buildTree_rec(Operands, Depth + 1, {TE, i});
4633       }
4634       return;
4635     }
4636     case Instruction::ShuffleVector: {
4637       // If this is not an alternate sequence of opcode like add-sub
4638       // then do not vectorize this instruction.
4639       if (!S.isAltShuffle()) {
4640         BS.cancelScheduling(VL, VL0);
4641         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4642                      ReuseShuffleIndicies);
4643         LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
4644         return;
4645       }
4646       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
4647                                    ReuseShuffleIndicies);
4648       LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
4649 
4650       // Reorder operands if reordering would enable vectorization.
4651       auto *CI = dyn_cast<CmpInst>(VL0);
4652       if (isa<BinaryOperator>(VL0) || CI) {
4653         ValueList Left, Right;
4654         if (!CI || all_of(VL, [](Value *V) {
4655               return cast<CmpInst>(V)->isCommutative();
4656             })) {
4657           reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
4658         } else {
4659           CmpInst::Predicate P0 = CI->getPredicate();
4660           CmpInst::Predicate AltP0 = cast<CmpInst>(S.AltOp)->getPredicate();
4661           assert(P0 != AltP0 &&
4662                  "Expected different main/alternate predicates.");
4663           CmpInst::Predicate AltP0Swapped = CmpInst::getSwappedPredicate(AltP0);
4664           Value *BaseOp0 = VL0->getOperand(0);
4665           Value *BaseOp1 = VL0->getOperand(1);
4666           // Collect operands - commute if it uses the swapped predicate or
4667           // alternate operation.
4668           for (Value *V : VL) {
4669             auto *Cmp = cast<CmpInst>(V);
4670             Value *LHS = Cmp->getOperand(0);
4671             Value *RHS = Cmp->getOperand(1);
4672             CmpInst::Predicate CurrentPred = Cmp->getPredicate();
4673             if (P0 == AltP0Swapped) {
4674               if (CI != Cmp && S.AltOp != Cmp &&
4675                   ((P0 == CurrentPred &&
4676                     !areCompatibleCmpOps(BaseOp0, BaseOp1, LHS, RHS)) ||
4677                    (AltP0 == CurrentPred &&
4678                     areCompatibleCmpOps(BaseOp0, BaseOp1, LHS, RHS))))
4679                 std::swap(LHS, RHS);
4680             } else if (P0 != CurrentPred && AltP0 != CurrentPred) {
4681               std::swap(LHS, RHS);
4682             }
4683             Left.push_back(LHS);
4684             Right.push_back(RHS);
4685           }
4686         }
4687         TE->setOperand(0, Left);
4688         TE->setOperand(1, Right);
4689         buildTree_rec(Left, Depth + 1, {TE, 0});
4690         buildTree_rec(Right, Depth + 1, {TE, 1});
4691         return;
4692       }
4693 
4694       TE->setOperandsInOrder();
4695       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
4696         ValueList Operands;
4697         // Prepare the operand vector.
4698         for (Value *V : VL)
4699           Operands.push_back(cast<Instruction>(V)->getOperand(i));
4700 
4701         buildTree_rec(Operands, Depth + 1, {TE, i});
4702       }
4703       return;
4704     }
4705     default:
4706       BS.cancelScheduling(VL, VL0);
4707       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4708                    ReuseShuffleIndicies);
4709       LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
4710       return;
4711   }
4712 }
4713 
4714 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const {
4715   unsigned N = 1;
4716   Type *EltTy = T;
4717 
4718   while (isa<StructType>(EltTy) || isa<ArrayType>(EltTy) ||
4719          isa<VectorType>(EltTy)) {
4720     if (auto *ST = dyn_cast<StructType>(EltTy)) {
4721       // Check that struct is homogeneous.
4722       for (const auto *Ty : ST->elements())
4723         if (Ty != *ST->element_begin())
4724           return 0;
4725       N *= ST->getNumElements();
4726       EltTy = *ST->element_begin();
4727     } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) {
4728       N *= AT->getNumElements();
4729       EltTy = AT->getElementType();
4730     } else {
4731       auto *VT = cast<FixedVectorType>(EltTy);
4732       N *= VT->getNumElements();
4733       EltTy = VT->getElementType();
4734     }
4735   }
4736 
4737   if (!isValidElementType(EltTy))
4738     return 0;
4739   uint64_t VTSize = DL.getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N));
4740   if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T))
4741     return 0;
4742   return N;
4743 }
4744 
4745 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
4746                               SmallVectorImpl<unsigned> &CurrentOrder) const {
4747   const auto *It = find_if(VL, [](Value *V) {
4748     return isa<ExtractElementInst, ExtractValueInst>(V);
4749   });
4750   assert(It != VL.end() && "Expected at least one extract instruction.");
4751   auto *E0 = cast<Instruction>(*It);
4752   assert(all_of(VL,
4753                 [](Value *V) {
4754                   return isa<UndefValue, ExtractElementInst, ExtractValueInst>(
4755                       V);
4756                 }) &&
4757          "Invalid opcode");
4758   // Check if all of the extracts come from the same vector and from the
4759   // correct offset.
4760   Value *Vec = E0->getOperand(0);
4761 
4762   CurrentOrder.clear();
4763 
4764   // We have to extract from a vector/aggregate with the same number of elements.
4765   unsigned NElts;
4766   if (E0->getOpcode() == Instruction::ExtractValue) {
4767     const DataLayout &DL = E0->getModule()->getDataLayout();
4768     NElts = canMapToVector(Vec->getType(), DL);
4769     if (!NElts)
4770       return false;
4771     // Check if load can be rewritten as load of vector.
4772     LoadInst *LI = dyn_cast<LoadInst>(Vec);
4773     if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size()))
4774       return false;
4775   } else {
4776     NElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
4777   }
4778 
4779   if (NElts != VL.size())
4780     return false;
4781 
4782   // Check that all of the indices extract from the correct offset.
4783   bool ShouldKeepOrder = true;
4784   unsigned E = VL.size();
4785   // Assign to all items the initial value E + 1 so we can check if the extract
4786   // instruction index was used already.
4787   // Also, later we can check that all the indices are used and we have a
4788   // consecutive access in the extract instructions, by checking that no
4789   // element of CurrentOrder still has value E + 1.
4790   CurrentOrder.assign(E, E);
4791   unsigned I = 0;
4792   for (; I < E; ++I) {
4793     auto *Inst = dyn_cast<Instruction>(VL[I]);
4794     if (!Inst)
4795       continue;
4796     if (Inst->getOperand(0) != Vec)
4797       break;
4798     if (auto *EE = dyn_cast<ExtractElementInst>(Inst))
4799       if (isa<UndefValue>(EE->getIndexOperand()))
4800         continue;
4801     Optional<unsigned> Idx = getExtractIndex(Inst);
4802     if (!Idx)
4803       break;
4804     const unsigned ExtIdx = *Idx;
4805     if (ExtIdx != I) {
4806       if (ExtIdx >= E || CurrentOrder[ExtIdx] != E)
4807         break;
4808       ShouldKeepOrder = false;
4809       CurrentOrder[ExtIdx] = I;
4810     } else {
4811       if (CurrentOrder[I] != E)
4812         break;
4813       CurrentOrder[I] = I;
4814     }
4815   }
4816   if (I < E) {
4817     CurrentOrder.clear();
4818     return false;
4819   }
4820   if (ShouldKeepOrder)
4821     CurrentOrder.clear();
4822 
4823   return ShouldKeepOrder;
4824 }
4825 
4826 bool BoUpSLP::areAllUsersVectorized(Instruction *I,
4827                                     ArrayRef<Value *> VectorizedVals) const {
4828   return (I->hasOneUse() && is_contained(VectorizedVals, I)) ||
4829          all_of(I->users(), [this](User *U) {
4830            return ScalarToTreeEntry.count(U) > 0 ||
4831                   isVectorLikeInstWithConstOps(U) ||
4832                   (isa<ExtractElementInst>(U) && MustGather.contains(U));
4833          });
4834 }
4835 
4836 static std::pair<InstructionCost, InstructionCost>
4837 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy,
4838                    TargetTransformInfo *TTI, TargetLibraryInfo *TLI) {
4839   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4840 
4841   // Calculate the cost of the scalar and vector calls.
4842   SmallVector<Type *, 4> VecTys;
4843   for (Use &Arg : CI->args())
4844     VecTys.push_back(
4845         FixedVectorType::get(Arg->getType(), VecTy->getNumElements()));
4846   FastMathFlags FMF;
4847   if (auto *FPCI = dyn_cast<FPMathOperator>(CI))
4848     FMF = FPCI->getFastMathFlags();
4849   SmallVector<const Value *> Arguments(CI->args());
4850   IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF,
4851                                     dyn_cast<IntrinsicInst>(CI));
4852   auto IntrinsicCost =
4853     TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput);
4854 
4855   auto Shape = VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>(
4856                                      VecTy->getNumElements())),
4857                             false /*HasGlobalPred*/);
4858   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
4859   auto LibCost = IntrinsicCost;
4860   if (!CI->isNoBuiltin() && VecFunc) {
4861     // Calculate the cost of the vector library call.
4862     // If the corresponding vector call is cheaper, return its cost.
4863     LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys,
4864                                     TTI::TCK_RecipThroughput);
4865   }
4866   return {IntrinsicCost, LibCost};
4867 }
4868 
4869 /// Compute the cost of creating a vector of type \p VecTy containing the
4870 /// extracted values from \p VL.
4871 static InstructionCost
4872 computeExtractCost(ArrayRef<Value *> VL, FixedVectorType *VecTy,
4873                    TargetTransformInfo::ShuffleKind ShuffleKind,
4874                    ArrayRef<int> Mask, TargetTransformInfo &TTI) {
4875   unsigned NumOfParts = TTI.getNumberOfParts(VecTy);
4876 
4877   if (ShuffleKind != TargetTransformInfo::SK_PermuteSingleSrc || !NumOfParts ||
4878       VecTy->getNumElements() < NumOfParts)
4879     return TTI.getShuffleCost(ShuffleKind, VecTy, Mask);
4880 
4881   bool AllConsecutive = true;
4882   unsigned EltsPerVector = VecTy->getNumElements() / NumOfParts;
4883   unsigned Idx = -1;
4884   InstructionCost Cost = 0;
4885 
4886   // Process extracts in blocks of EltsPerVector to check if the source vector
4887   // operand can be re-used directly. If not, add the cost of creating a shuffle
4888   // to extract the values into a vector register.
4889   for (auto *V : VL) {
4890     ++Idx;
4891 
4892     // Need to exclude undefs from analysis.
4893     if (isa<UndefValue>(V) || Mask[Idx] == UndefMaskElem)
4894       continue;
4895 
4896     // Reached the start of a new vector registers.
4897     if (Idx % EltsPerVector == 0) {
4898       AllConsecutive = true;
4899       continue;
4900     }
4901 
4902     // Check all extracts for a vector register on the target directly
4903     // extract values in order.
4904     unsigned CurrentIdx = *getExtractIndex(cast<Instruction>(V));
4905     if (!isa<UndefValue>(VL[Idx - 1]) && Mask[Idx - 1] != UndefMaskElem) {
4906       unsigned PrevIdx = *getExtractIndex(cast<Instruction>(VL[Idx - 1]));
4907       AllConsecutive &= PrevIdx + 1 == CurrentIdx &&
4908                         CurrentIdx % EltsPerVector == Idx % EltsPerVector;
4909     }
4910 
4911     if (AllConsecutive)
4912       continue;
4913 
4914     // Skip all indices, except for the last index per vector block.
4915     if ((Idx + 1) % EltsPerVector != 0 && Idx + 1 != VL.size())
4916       continue;
4917 
4918     // If we have a series of extracts which are not consecutive and hence
4919     // cannot re-use the source vector register directly, compute the shuffle
4920     // cost to extract the a vector with EltsPerVector elements.
4921     Cost += TTI.getShuffleCost(
4922         TargetTransformInfo::SK_PermuteSingleSrc,
4923         FixedVectorType::get(VecTy->getElementType(), EltsPerVector));
4924   }
4925   return Cost;
4926 }
4927 
4928 /// Build shuffle mask for shuffle graph entries and lists of main and alternate
4929 /// operations operands.
4930 static void
4931 buildShuffleEntryMask(ArrayRef<Value *> VL, ArrayRef<unsigned> ReorderIndices,
4932                       ArrayRef<int> ReusesIndices,
4933                       const function_ref<bool(Instruction *)> IsAltOp,
4934                       SmallVectorImpl<int> &Mask,
4935                       SmallVectorImpl<Value *> *OpScalars = nullptr,
4936                       SmallVectorImpl<Value *> *AltScalars = nullptr) {
4937   unsigned Sz = VL.size();
4938   Mask.assign(Sz, UndefMaskElem);
4939   SmallVector<int> OrderMask;
4940   if (!ReorderIndices.empty())
4941     inversePermutation(ReorderIndices, OrderMask);
4942   for (unsigned I = 0; I < Sz; ++I) {
4943     unsigned Idx = I;
4944     if (!ReorderIndices.empty())
4945       Idx = OrderMask[I];
4946     auto *OpInst = cast<Instruction>(VL[Idx]);
4947     if (IsAltOp(OpInst)) {
4948       Mask[I] = Sz + Idx;
4949       if (AltScalars)
4950         AltScalars->push_back(OpInst);
4951     } else {
4952       Mask[I] = Idx;
4953       if (OpScalars)
4954         OpScalars->push_back(OpInst);
4955     }
4956   }
4957   if (!ReusesIndices.empty()) {
4958     SmallVector<int> NewMask(ReusesIndices.size(), UndefMaskElem);
4959     transform(ReusesIndices, NewMask.begin(), [&Mask](int Idx) {
4960       return Idx != UndefMaskElem ? Mask[Idx] : UndefMaskElem;
4961     });
4962     Mask.swap(NewMask);
4963   }
4964 }
4965 
4966 /// Checks if the specified instruction \p I is an alternate operation for the
4967 /// given \p MainOp and \p AltOp instructions.
4968 static bool isAlternateInstruction(const Instruction *I,
4969                                    const Instruction *MainOp,
4970                                    const Instruction *AltOp) {
4971   if (auto *CI0 = dyn_cast<CmpInst>(MainOp)) {
4972     auto *AltCI0 = cast<CmpInst>(AltOp);
4973     auto *CI = cast<CmpInst>(I);
4974     CmpInst::Predicate P0 = CI0->getPredicate();
4975     CmpInst::Predicate AltP0 = AltCI0->getPredicate();
4976     assert(P0 != AltP0 && "Expected different main/alternate predicates.");
4977     CmpInst::Predicate AltP0Swapped = CmpInst::getSwappedPredicate(AltP0);
4978     CmpInst::Predicate CurrentPred = CI->getPredicate();
4979     if (P0 == AltP0Swapped)
4980       return I == AltCI0 ||
4981              (I != MainOp &&
4982               !areCompatibleCmpOps(CI0->getOperand(0), CI0->getOperand(1),
4983                                    CI->getOperand(0), CI->getOperand(1)));
4984     return AltP0 == CurrentPred || AltP0Swapped == CurrentPred;
4985   }
4986   return I->getOpcode() == AltOp->getOpcode();
4987 }
4988 
4989 InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
4990                                       ArrayRef<Value *> VectorizedVals) {
4991   ArrayRef<Value*> VL = E->Scalars;
4992 
4993   Type *ScalarTy = VL[0]->getType();
4994   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
4995     ScalarTy = SI->getValueOperand()->getType();
4996   else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0]))
4997     ScalarTy = CI->getOperand(0)->getType();
4998   else if (auto *IE = dyn_cast<InsertElementInst>(VL[0]))
4999     ScalarTy = IE->getOperand(1)->getType();
5000   auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
5001   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
5002 
5003   // If we have computed a smaller type for the expression, update VecTy so
5004   // that the costs will be accurate.
5005   if (MinBWs.count(VL[0]))
5006     VecTy = FixedVectorType::get(
5007         IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size());
5008   unsigned EntryVF = E->getVectorFactor();
5009   auto *FinalVecTy = FixedVectorType::get(VecTy->getElementType(), EntryVF);
5010 
5011   bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
5012   // FIXME: it tries to fix a problem with MSVC buildbots.
5013   TargetTransformInfo &TTIRef = *TTI;
5014   auto &&AdjustExtractsCost = [this, &TTIRef, CostKind, VL, VecTy,
5015                                VectorizedVals, E](InstructionCost &Cost) {
5016     DenseMap<Value *, int> ExtractVectorsTys;
5017     SmallPtrSet<Value *, 4> CheckedExtracts;
5018     for (auto *V : VL) {
5019       if (isa<UndefValue>(V))
5020         continue;
5021       // If all users of instruction are going to be vectorized and this
5022       // instruction itself is not going to be vectorized, consider this
5023       // instruction as dead and remove its cost from the final cost of the
5024       // vectorized tree.
5025       // Also, avoid adjusting the cost for extractelements with multiple uses
5026       // in different graph entries.
5027       const TreeEntry *VE = getTreeEntry(V);
5028       if (!CheckedExtracts.insert(V).second ||
5029           !areAllUsersVectorized(cast<Instruction>(V), VectorizedVals) ||
5030           (VE && VE != E))
5031         continue;
5032       auto *EE = cast<ExtractElementInst>(V);
5033       Optional<unsigned> EEIdx = getExtractIndex(EE);
5034       if (!EEIdx)
5035         continue;
5036       unsigned Idx = *EEIdx;
5037       if (TTIRef.getNumberOfParts(VecTy) !=
5038           TTIRef.getNumberOfParts(EE->getVectorOperandType())) {
5039         auto It =
5040             ExtractVectorsTys.try_emplace(EE->getVectorOperand(), Idx).first;
5041         It->getSecond() = std::min<int>(It->second, Idx);
5042       }
5043       // Take credit for instruction that will become dead.
5044       if (EE->hasOneUse()) {
5045         Instruction *Ext = EE->user_back();
5046         if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
5047             all_of(Ext->users(),
5048                    [](User *U) { return isa<GetElementPtrInst>(U); })) {
5049           // Use getExtractWithExtendCost() to calculate the cost of
5050           // extractelement/ext pair.
5051           Cost -=
5052               TTIRef.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(),
5053                                               EE->getVectorOperandType(), Idx);
5054           // Add back the cost of s|zext which is subtracted separately.
5055           Cost += TTIRef.getCastInstrCost(
5056               Ext->getOpcode(), Ext->getType(), EE->getType(),
5057               TTI::getCastContextHint(Ext), CostKind, Ext);
5058           continue;
5059         }
5060       }
5061       Cost -= TTIRef.getVectorInstrCost(Instruction::ExtractElement,
5062                                         EE->getVectorOperandType(), Idx);
5063     }
5064     // Add a cost for subvector extracts/inserts if required.
5065     for (const auto &Data : ExtractVectorsTys) {
5066       auto *EEVTy = cast<FixedVectorType>(Data.first->getType());
5067       unsigned NumElts = VecTy->getNumElements();
5068       if (Data.second % NumElts == 0)
5069         continue;
5070       if (TTIRef.getNumberOfParts(EEVTy) > TTIRef.getNumberOfParts(VecTy)) {
5071         unsigned Idx = (Data.second / NumElts) * NumElts;
5072         unsigned EENumElts = EEVTy->getNumElements();
5073         if (Idx + NumElts <= EENumElts) {
5074           Cost +=
5075               TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
5076                                     EEVTy, None, Idx, VecTy);
5077         } else {
5078           // Need to round up the subvector type vectorization factor to avoid a
5079           // crash in cost model functions. Make SubVT so that Idx + VF of SubVT
5080           // <= EENumElts.
5081           auto *SubVT =
5082               FixedVectorType::get(VecTy->getElementType(), EENumElts - Idx);
5083           Cost +=
5084               TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
5085                                     EEVTy, None, Idx, SubVT);
5086         }
5087       } else {
5088         Cost += TTIRef.getShuffleCost(TargetTransformInfo::SK_InsertSubvector,
5089                                       VecTy, None, 0, EEVTy);
5090       }
5091     }
5092   };
5093   if (E->State == TreeEntry::NeedToGather) {
5094     if (allConstant(VL))
5095       return 0;
5096     if (isa<InsertElementInst>(VL[0]))
5097       return InstructionCost::getInvalid();
5098     SmallVector<int> Mask;
5099     SmallVector<const TreeEntry *> Entries;
5100     Optional<TargetTransformInfo::ShuffleKind> Shuffle =
5101         isGatherShuffledEntry(E, Mask, Entries);
5102     if (Shuffle.hasValue()) {
5103       InstructionCost GatherCost = 0;
5104       if (ShuffleVectorInst::isIdentityMask(Mask)) {
5105         // Perfect match in the graph, will reuse the previously vectorized
5106         // node. Cost is 0.
5107         LLVM_DEBUG(
5108             dbgs()
5109             << "SLP: perfect diamond match for gather bundle that starts with "
5110             << *VL.front() << ".\n");
5111         if (NeedToShuffleReuses)
5112           GatherCost =
5113               TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc,
5114                                   FinalVecTy, E->ReuseShuffleIndices);
5115       } else {
5116         LLVM_DEBUG(dbgs() << "SLP: shuffled " << Entries.size()
5117                           << " entries for bundle that starts with "
5118                           << *VL.front() << ".\n");
5119         // Detected that instead of gather we can emit a shuffle of single/two
5120         // previously vectorized nodes. Add the cost of the permutation rather
5121         // than gather.
5122         ::addMask(Mask, E->ReuseShuffleIndices);
5123         GatherCost = TTI->getShuffleCost(*Shuffle, FinalVecTy, Mask);
5124       }
5125       return GatherCost;
5126     }
5127     if ((E->getOpcode() == Instruction::ExtractElement ||
5128          all_of(E->Scalars,
5129                 [](Value *V) {
5130                   return isa<ExtractElementInst, UndefValue>(V);
5131                 })) &&
5132         allSameType(VL)) {
5133       // Check that gather of extractelements can be represented as just a
5134       // shuffle of a single/two vectors the scalars are extracted from.
5135       SmallVector<int> Mask;
5136       Optional<TargetTransformInfo::ShuffleKind> ShuffleKind =
5137           isFixedVectorShuffle(VL, Mask);
5138       if (ShuffleKind.hasValue()) {
5139         // Found the bunch of extractelement instructions that must be gathered
5140         // into a vector and can be represented as a permutation elements in a
5141         // single input vector or of 2 input vectors.
5142         InstructionCost Cost =
5143             computeExtractCost(VL, VecTy, *ShuffleKind, Mask, *TTI);
5144         AdjustExtractsCost(Cost);
5145         if (NeedToShuffleReuses)
5146           Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc,
5147                                       FinalVecTy, E->ReuseShuffleIndices);
5148         return Cost;
5149       }
5150     }
5151     if (isSplat(VL)) {
5152       // Found the broadcasting of the single scalar, calculate the cost as the
5153       // broadcast.
5154       assert(VecTy == FinalVecTy &&
5155              "No reused scalars expected for broadcast.");
5156       return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy);
5157     }
5158     InstructionCost ReuseShuffleCost = 0;
5159     if (NeedToShuffleReuses)
5160       ReuseShuffleCost = TTI->getShuffleCost(
5161           TTI::SK_PermuteSingleSrc, FinalVecTy, E->ReuseShuffleIndices);
5162     // Improve gather cost for gather of loads, if we can group some of the
5163     // loads into vector loads.
5164     if (VL.size() > 2 && E->getOpcode() == Instruction::Load &&
5165         !E->isAltShuffle()) {
5166       BoUpSLP::ValueSet VectorizedLoads;
5167       unsigned StartIdx = 0;
5168       unsigned VF = VL.size() / 2;
5169       unsigned VectorizedCnt = 0;
5170       unsigned ScatterVectorizeCnt = 0;
5171       const unsigned Sz = DL->getTypeSizeInBits(E->getMainOp()->getType());
5172       for (unsigned MinVF = getMinVF(2 * Sz); VF >= MinVF; VF /= 2) {
5173         for (unsigned Cnt = StartIdx, End = VL.size(); Cnt + VF <= End;
5174              Cnt += VF) {
5175           ArrayRef<Value *> Slice = VL.slice(Cnt, VF);
5176           if (!VectorizedLoads.count(Slice.front()) &&
5177               !VectorizedLoads.count(Slice.back()) && allSameBlock(Slice)) {
5178             SmallVector<Value *> PointerOps;
5179             OrdersType CurrentOrder;
5180             LoadsState LS = canVectorizeLoads(Slice, Slice.front(), *TTI, *DL,
5181                                               *SE, CurrentOrder, PointerOps);
5182             switch (LS) {
5183             case LoadsState::Vectorize:
5184             case LoadsState::ScatterVectorize:
5185               // Mark the vectorized loads so that we don't vectorize them
5186               // again.
5187               if (LS == LoadsState::Vectorize)
5188                 ++VectorizedCnt;
5189               else
5190                 ++ScatterVectorizeCnt;
5191               VectorizedLoads.insert(Slice.begin(), Slice.end());
5192               // If we vectorized initial block, no need to try to vectorize it
5193               // again.
5194               if (Cnt == StartIdx)
5195                 StartIdx += VF;
5196               break;
5197             case LoadsState::Gather:
5198               break;
5199             }
5200           }
5201         }
5202         // Check if the whole array was vectorized already - exit.
5203         if (StartIdx >= VL.size())
5204           break;
5205         // Found vectorizable parts - exit.
5206         if (!VectorizedLoads.empty())
5207           break;
5208       }
5209       if (!VectorizedLoads.empty()) {
5210         InstructionCost GatherCost = 0;
5211         unsigned NumParts = TTI->getNumberOfParts(VecTy);
5212         bool NeedInsertSubvectorAnalysis =
5213             !NumParts || (VL.size() / VF) > NumParts;
5214         // Get the cost for gathered loads.
5215         for (unsigned I = 0, End = VL.size(); I < End; I += VF) {
5216           if (VectorizedLoads.contains(VL[I]))
5217             continue;
5218           GatherCost += getGatherCost(VL.slice(I, VF));
5219         }
5220         // The cost for vectorized loads.
5221         InstructionCost ScalarsCost = 0;
5222         for (Value *V : VectorizedLoads) {
5223           auto *LI = cast<LoadInst>(V);
5224           ScalarsCost += TTI->getMemoryOpCost(
5225               Instruction::Load, LI->getType(), LI->getAlign(),
5226               LI->getPointerAddressSpace(), CostKind, LI);
5227         }
5228         auto *LI = cast<LoadInst>(E->getMainOp());
5229         auto *LoadTy = FixedVectorType::get(LI->getType(), VF);
5230         Align Alignment = LI->getAlign();
5231         GatherCost +=
5232             VectorizedCnt *
5233             TTI->getMemoryOpCost(Instruction::Load, LoadTy, Alignment,
5234                                  LI->getPointerAddressSpace(), CostKind, LI);
5235         GatherCost += ScatterVectorizeCnt *
5236                       TTI->getGatherScatterOpCost(
5237                           Instruction::Load, LoadTy, LI->getPointerOperand(),
5238                           /*VariableMask=*/false, Alignment, CostKind, LI);
5239         if (NeedInsertSubvectorAnalysis) {
5240           // Add the cost for the subvectors insert.
5241           for (int I = VF, E = VL.size(); I < E; I += VF)
5242             GatherCost += TTI->getShuffleCost(TTI::SK_InsertSubvector, VecTy,
5243                                               None, I, LoadTy);
5244         }
5245         return ReuseShuffleCost + GatherCost - ScalarsCost;
5246       }
5247     }
5248     return ReuseShuffleCost + getGatherCost(VL);
5249   }
5250   InstructionCost CommonCost = 0;
5251   SmallVector<int> Mask;
5252   if (!E->ReorderIndices.empty()) {
5253     SmallVector<int> NewMask;
5254     if (E->getOpcode() == Instruction::Store) {
5255       // For stores the order is actually a mask.
5256       NewMask.resize(E->ReorderIndices.size());
5257       copy(E->ReorderIndices, NewMask.begin());
5258     } else {
5259       inversePermutation(E->ReorderIndices, NewMask);
5260     }
5261     ::addMask(Mask, NewMask);
5262   }
5263   if (NeedToShuffleReuses)
5264     ::addMask(Mask, E->ReuseShuffleIndices);
5265   if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask))
5266     CommonCost =
5267         TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask);
5268   assert((E->State == TreeEntry::Vectorize ||
5269           E->State == TreeEntry::ScatterVectorize) &&
5270          "Unhandled state");
5271   assert(E->getOpcode() && allSameType(VL) && allSameBlock(VL) && "Invalid VL");
5272   Instruction *VL0 = E->getMainOp();
5273   unsigned ShuffleOrOp =
5274       E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
5275   switch (ShuffleOrOp) {
5276     case Instruction::PHI:
5277       return 0;
5278 
5279     case Instruction::ExtractValue:
5280     case Instruction::ExtractElement: {
5281       // The common cost of removal ExtractElement/ExtractValue instructions +
5282       // the cost of shuffles, if required to resuffle the original vector.
5283       if (NeedToShuffleReuses) {
5284         unsigned Idx = 0;
5285         for (unsigned I : E->ReuseShuffleIndices) {
5286           if (ShuffleOrOp == Instruction::ExtractElement) {
5287             auto *EE = cast<ExtractElementInst>(VL[I]);
5288             CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement,
5289                                                   EE->getVectorOperandType(),
5290                                                   *getExtractIndex(EE));
5291           } else {
5292             CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement,
5293                                                   VecTy, Idx);
5294             ++Idx;
5295           }
5296         }
5297         Idx = EntryVF;
5298         for (Value *V : VL) {
5299           if (ShuffleOrOp == Instruction::ExtractElement) {
5300             auto *EE = cast<ExtractElementInst>(V);
5301             CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement,
5302                                                   EE->getVectorOperandType(),
5303                                                   *getExtractIndex(EE));
5304           } else {
5305             --Idx;
5306             CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement,
5307                                                   VecTy, Idx);
5308           }
5309         }
5310       }
5311       if (ShuffleOrOp == Instruction::ExtractValue) {
5312         for (unsigned I = 0, E = VL.size(); I < E; ++I) {
5313           auto *EI = cast<Instruction>(VL[I]);
5314           // Take credit for instruction that will become dead.
5315           if (EI->hasOneUse()) {
5316             Instruction *Ext = EI->user_back();
5317             if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
5318                 all_of(Ext->users(),
5319                        [](User *U) { return isa<GetElementPtrInst>(U); })) {
5320               // Use getExtractWithExtendCost() to calculate the cost of
5321               // extractelement/ext pair.
5322               CommonCost -= TTI->getExtractWithExtendCost(
5323                   Ext->getOpcode(), Ext->getType(), VecTy, I);
5324               // Add back the cost of s|zext which is subtracted separately.
5325               CommonCost += TTI->getCastInstrCost(
5326                   Ext->getOpcode(), Ext->getType(), EI->getType(),
5327                   TTI::getCastContextHint(Ext), CostKind, Ext);
5328               continue;
5329             }
5330           }
5331           CommonCost -=
5332               TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, I);
5333         }
5334       } else {
5335         AdjustExtractsCost(CommonCost);
5336       }
5337       return CommonCost;
5338     }
5339     case Instruction::InsertElement: {
5340       assert(E->ReuseShuffleIndices.empty() &&
5341              "Unique insertelements only are expected.");
5342       auto *SrcVecTy = cast<FixedVectorType>(VL0->getType());
5343 
5344       unsigned const NumElts = SrcVecTy->getNumElements();
5345       unsigned const NumScalars = VL.size();
5346       APInt DemandedElts = APInt::getZero(NumElts);
5347       // TODO: Add support for Instruction::InsertValue.
5348       SmallVector<int> Mask;
5349       if (!E->ReorderIndices.empty()) {
5350         inversePermutation(E->ReorderIndices, Mask);
5351         Mask.append(NumElts - NumScalars, UndefMaskElem);
5352       } else {
5353         Mask.assign(NumElts, UndefMaskElem);
5354         std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0);
5355       }
5356       unsigned Offset = *getInsertIndex(VL0);
5357       bool IsIdentity = true;
5358       SmallVector<int> PrevMask(NumElts, UndefMaskElem);
5359       Mask.swap(PrevMask);
5360       for (unsigned I = 0; I < NumScalars; ++I) {
5361         unsigned InsertIdx = *getInsertIndex(VL[PrevMask[I]]);
5362         DemandedElts.setBit(InsertIdx);
5363         IsIdentity &= InsertIdx - Offset == I;
5364         Mask[InsertIdx - Offset] = I;
5365       }
5366       assert(Offset < NumElts && "Failed to find vector index offset");
5367 
5368       InstructionCost Cost = 0;
5369       Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts,
5370                                             /*Insert*/ true, /*Extract*/ false);
5371 
5372       if (IsIdentity && NumElts != NumScalars && Offset % NumScalars != 0) {
5373         // FIXME: Replace with SK_InsertSubvector once it is properly supported.
5374         unsigned Sz = PowerOf2Ceil(Offset + NumScalars);
5375         Cost += TTI->getShuffleCost(
5376             TargetTransformInfo::SK_PermuteSingleSrc,
5377             FixedVectorType::get(SrcVecTy->getElementType(), Sz));
5378       } else if (!IsIdentity) {
5379         auto *FirstInsert =
5380             cast<Instruction>(*find_if(E->Scalars, [E](Value *V) {
5381               return !is_contained(E->Scalars,
5382                                    cast<Instruction>(V)->getOperand(0));
5383             }));
5384         if (isUndefVector(FirstInsert->getOperand(0))) {
5385           Cost += TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, SrcVecTy, Mask);
5386         } else {
5387           SmallVector<int> InsertMask(NumElts);
5388           std::iota(InsertMask.begin(), InsertMask.end(), 0);
5389           for (unsigned I = 0; I < NumElts; I++) {
5390             if (Mask[I] != UndefMaskElem)
5391               InsertMask[Offset + I] = NumElts + I;
5392           }
5393           Cost +=
5394               TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVecTy, InsertMask);
5395         }
5396       }
5397 
5398       return Cost;
5399     }
5400     case Instruction::ZExt:
5401     case Instruction::SExt:
5402     case Instruction::FPToUI:
5403     case Instruction::FPToSI:
5404     case Instruction::FPExt:
5405     case Instruction::PtrToInt:
5406     case Instruction::IntToPtr:
5407     case Instruction::SIToFP:
5408     case Instruction::UIToFP:
5409     case Instruction::Trunc:
5410     case Instruction::FPTrunc:
5411     case Instruction::BitCast: {
5412       Type *SrcTy = VL0->getOperand(0)->getType();
5413       InstructionCost ScalarEltCost =
5414           TTI->getCastInstrCost(E->getOpcode(), ScalarTy, SrcTy,
5415                                 TTI::getCastContextHint(VL0), CostKind, VL0);
5416       if (NeedToShuffleReuses) {
5417         CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
5418       }
5419 
5420       // Calculate the cost of this instruction.
5421       InstructionCost ScalarCost = VL.size() * ScalarEltCost;
5422 
5423       auto *SrcVecTy = FixedVectorType::get(SrcTy, VL.size());
5424       InstructionCost VecCost = 0;
5425       // Check if the values are candidates to demote.
5426       if (!MinBWs.count(VL0) || VecTy != SrcVecTy) {
5427         VecCost = CommonCost + TTI->getCastInstrCost(
5428                                    E->getOpcode(), VecTy, SrcVecTy,
5429                                    TTI::getCastContextHint(VL0), CostKind, VL0);
5430       }
5431       LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
5432       return VecCost - ScalarCost;
5433     }
5434     case Instruction::FCmp:
5435     case Instruction::ICmp:
5436     case Instruction::Select: {
5437       // Calculate the cost of this instruction.
5438       InstructionCost ScalarEltCost =
5439           TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, Builder.getInt1Ty(),
5440                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, VL0);
5441       if (NeedToShuffleReuses) {
5442         CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
5443       }
5444       auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size());
5445       InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost;
5446 
5447       // Check if all entries in VL are either compares or selects with compares
5448       // as condition that have the same predicates.
5449       CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE;
5450       bool First = true;
5451       for (auto *V : VL) {
5452         CmpInst::Predicate CurrentPred;
5453         auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value());
5454         if ((!match(V, m_Select(MatchCmp, m_Value(), m_Value())) &&
5455              !match(V, MatchCmp)) ||
5456             (!First && VecPred != CurrentPred)) {
5457           VecPred = CmpInst::BAD_ICMP_PREDICATE;
5458           break;
5459         }
5460         First = false;
5461         VecPred = CurrentPred;
5462       }
5463 
5464       InstructionCost VecCost = TTI->getCmpSelInstrCost(
5465           E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0);
5466       // Check if it is possible and profitable to use min/max for selects in
5467       // VL.
5468       //
5469       auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL);
5470       if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) {
5471         IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy,
5472                                           {VecTy, VecTy});
5473         InstructionCost IntrinsicCost =
5474             TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
5475         // If the selects are the only uses of the compares, they will be dead
5476         // and we can adjust the cost by removing their cost.
5477         if (IntrinsicAndUse.second)
5478           IntrinsicCost -= TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy,
5479                                                    MaskTy, VecPred, CostKind);
5480         VecCost = std::min(VecCost, IntrinsicCost);
5481       }
5482       LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
5483       return CommonCost + VecCost - ScalarCost;
5484     }
5485     case Instruction::FNeg:
5486     case Instruction::Add:
5487     case Instruction::FAdd:
5488     case Instruction::Sub:
5489     case Instruction::FSub:
5490     case Instruction::Mul:
5491     case Instruction::FMul:
5492     case Instruction::UDiv:
5493     case Instruction::SDiv:
5494     case Instruction::FDiv:
5495     case Instruction::URem:
5496     case Instruction::SRem:
5497     case Instruction::FRem:
5498     case Instruction::Shl:
5499     case Instruction::LShr:
5500     case Instruction::AShr:
5501     case Instruction::And:
5502     case Instruction::Or:
5503     case Instruction::Xor: {
5504       // Certain instructions can be cheaper to vectorize if they have a
5505       // constant second vector operand.
5506       TargetTransformInfo::OperandValueKind Op1VK =
5507           TargetTransformInfo::OK_AnyValue;
5508       TargetTransformInfo::OperandValueKind Op2VK =
5509           TargetTransformInfo::OK_UniformConstantValue;
5510       TargetTransformInfo::OperandValueProperties Op1VP =
5511           TargetTransformInfo::OP_None;
5512       TargetTransformInfo::OperandValueProperties Op2VP =
5513           TargetTransformInfo::OP_PowerOf2;
5514 
5515       // If all operands are exactly the same ConstantInt then set the
5516       // operand kind to OK_UniformConstantValue.
5517       // If instead not all operands are constants, then set the operand kind
5518       // to OK_AnyValue. If all operands are constants but not the same,
5519       // then set the operand kind to OK_NonUniformConstantValue.
5520       ConstantInt *CInt0 = nullptr;
5521       for (unsigned i = 0, e = VL.size(); i < e; ++i) {
5522         const Instruction *I = cast<Instruction>(VL[i]);
5523         unsigned OpIdx = isa<BinaryOperator>(I) ? 1 : 0;
5524         ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(OpIdx));
5525         if (!CInt) {
5526           Op2VK = TargetTransformInfo::OK_AnyValue;
5527           Op2VP = TargetTransformInfo::OP_None;
5528           break;
5529         }
5530         if (Op2VP == TargetTransformInfo::OP_PowerOf2 &&
5531             !CInt->getValue().isPowerOf2())
5532           Op2VP = TargetTransformInfo::OP_None;
5533         if (i == 0) {
5534           CInt0 = CInt;
5535           continue;
5536         }
5537         if (CInt0 != CInt)
5538           Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
5539       }
5540 
5541       SmallVector<const Value *, 4> Operands(VL0->operand_values());
5542       InstructionCost ScalarEltCost =
5543           TTI->getArithmeticInstrCost(E->getOpcode(), ScalarTy, CostKind, Op1VK,
5544                                       Op2VK, Op1VP, Op2VP, Operands, VL0);
5545       if (NeedToShuffleReuses) {
5546         CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
5547       }
5548       InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost;
5549       InstructionCost VecCost =
5550           TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind, Op1VK,
5551                                       Op2VK, Op1VP, Op2VP, Operands, VL0);
5552       LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
5553       return CommonCost + VecCost - ScalarCost;
5554     }
5555     case Instruction::GetElementPtr: {
5556       TargetTransformInfo::OperandValueKind Op1VK =
5557           TargetTransformInfo::OK_AnyValue;
5558       TargetTransformInfo::OperandValueKind Op2VK =
5559           TargetTransformInfo::OK_UniformConstantValue;
5560 
5561       InstructionCost ScalarEltCost = TTI->getArithmeticInstrCost(
5562           Instruction::Add, ScalarTy, CostKind, Op1VK, Op2VK);
5563       if (NeedToShuffleReuses) {
5564         CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
5565       }
5566       InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost;
5567       InstructionCost VecCost = TTI->getArithmeticInstrCost(
5568           Instruction::Add, VecTy, CostKind, Op1VK, Op2VK);
5569       LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
5570       return CommonCost + VecCost - ScalarCost;
5571     }
5572     case Instruction::Load: {
5573       // Cost of wide load - cost of scalar loads.
5574       Align Alignment = cast<LoadInst>(VL0)->getAlign();
5575       InstructionCost ScalarEltCost = TTI->getMemoryOpCost(
5576           Instruction::Load, ScalarTy, Alignment, 0, CostKind, VL0);
5577       if (NeedToShuffleReuses) {
5578         CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
5579       }
5580       InstructionCost ScalarLdCost = VecTy->getNumElements() * ScalarEltCost;
5581       InstructionCost VecLdCost;
5582       if (E->State == TreeEntry::Vectorize) {
5583         VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, Alignment, 0,
5584                                          CostKind, VL0);
5585       } else {
5586         assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState");
5587         Align CommonAlignment = Alignment;
5588         for (Value *V : VL)
5589           CommonAlignment =
5590               commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign());
5591         VecLdCost = TTI->getGatherScatterOpCost(
5592             Instruction::Load, VecTy, cast<LoadInst>(VL0)->getPointerOperand(),
5593             /*VariableMask=*/false, CommonAlignment, CostKind, VL0);
5594       }
5595       LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecLdCost, ScalarLdCost));
5596       return CommonCost + VecLdCost - ScalarLdCost;
5597     }
5598     case Instruction::Store: {
5599       // We know that we can merge the stores. Calculate the cost.
5600       bool IsReorder = !E->ReorderIndices.empty();
5601       auto *SI =
5602           cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0);
5603       Align Alignment = SI->getAlign();
5604       InstructionCost ScalarEltCost = TTI->getMemoryOpCost(
5605           Instruction::Store, ScalarTy, Alignment, 0, CostKind, VL0);
5606       InstructionCost ScalarStCost = VecTy->getNumElements() * ScalarEltCost;
5607       InstructionCost VecStCost = TTI->getMemoryOpCost(
5608           Instruction::Store, VecTy, Alignment, 0, CostKind, VL0);
5609       LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecStCost, ScalarStCost));
5610       return CommonCost + VecStCost - ScalarStCost;
5611     }
5612     case Instruction::Call: {
5613       CallInst *CI = cast<CallInst>(VL0);
5614       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
5615 
5616       // Calculate the cost of the scalar and vector calls.
5617       IntrinsicCostAttributes CostAttrs(ID, *CI, 1);
5618       InstructionCost ScalarEltCost =
5619           TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
5620       if (NeedToShuffleReuses) {
5621         CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
5622       }
5623       InstructionCost ScalarCallCost = VecTy->getNumElements() * ScalarEltCost;
5624 
5625       auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI);
5626       InstructionCost VecCallCost =
5627           std::min(VecCallCosts.first, VecCallCosts.second);
5628 
5629       LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost
5630                         << " (" << VecCallCost << "-" << ScalarCallCost << ")"
5631                         << " for " << *CI << "\n");
5632 
5633       return CommonCost + VecCallCost - ScalarCallCost;
5634     }
5635     case Instruction::ShuffleVector: {
5636       assert(E->isAltShuffle() &&
5637              ((Instruction::isBinaryOp(E->getOpcode()) &&
5638                Instruction::isBinaryOp(E->getAltOpcode())) ||
5639               (Instruction::isCast(E->getOpcode()) &&
5640                Instruction::isCast(E->getAltOpcode())) ||
5641               (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) &&
5642              "Invalid Shuffle Vector Operand");
5643       InstructionCost ScalarCost = 0;
5644       if (NeedToShuffleReuses) {
5645         for (unsigned Idx : E->ReuseShuffleIndices) {
5646           Instruction *I = cast<Instruction>(VL[Idx]);
5647           CommonCost -= TTI->getInstructionCost(I, CostKind);
5648         }
5649         for (Value *V : VL) {
5650           Instruction *I = cast<Instruction>(V);
5651           CommonCost += TTI->getInstructionCost(I, CostKind);
5652         }
5653       }
5654       for (Value *V : VL) {
5655         Instruction *I = cast<Instruction>(V);
5656         assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode");
5657         ScalarCost += TTI->getInstructionCost(I, CostKind);
5658       }
5659       // VecCost is equal to sum of the cost of creating 2 vectors
5660       // and the cost of creating shuffle.
5661       InstructionCost VecCost = 0;
5662       // Try to find the previous shuffle node with the same operands and same
5663       // main/alternate ops.
5664       auto &&TryFindNodeWithEqualOperands = [this, E]() {
5665         for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
5666           if (TE.get() == E)
5667             break;
5668           if (TE->isAltShuffle() &&
5669               ((TE->getOpcode() == E->getOpcode() &&
5670                 TE->getAltOpcode() == E->getAltOpcode()) ||
5671                (TE->getOpcode() == E->getAltOpcode() &&
5672                 TE->getAltOpcode() == E->getOpcode())) &&
5673               TE->hasEqualOperands(*E))
5674             return true;
5675         }
5676         return false;
5677       };
5678       if (TryFindNodeWithEqualOperands()) {
5679         LLVM_DEBUG({
5680           dbgs() << "SLP: diamond match for alternate node found.\n";
5681           E->dump();
5682         });
5683         // No need to add new vector costs here since we're going to reuse
5684         // same main/alternate vector ops, just do different shuffling.
5685       } else if (Instruction::isBinaryOp(E->getOpcode())) {
5686         VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind);
5687         VecCost += TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy,
5688                                                CostKind);
5689       } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) {
5690         VecCost = TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy,
5691                                           Builder.getInt1Ty(),
5692                                           CI0->getPredicate(), CostKind, VL0);
5693         VecCost += TTI->getCmpSelInstrCost(
5694             E->getOpcode(), ScalarTy, Builder.getInt1Ty(),
5695             cast<CmpInst>(E->getAltOp())->getPredicate(), CostKind,
5696             E->getAltOp());
5697       } else {
5698         Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType();
5699         Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType();
5700         auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size());
5701         auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size());
5702         VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty,
5703                                         TTI::CastContextHint::None, CostKind);
5704         VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty,
5705                                          TTI::CastContextHint::None, CostKind);
5706       }
5707 
5708       SmallVector<int> Mask;
5709       buildShuffleEntryMask(
5710           E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices,
5711           [E](Instruction *I) {
5712             assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode");
5713             return isAlternateInstruction(I, E->getMainOp(), E->getAltOp());
5714           },
5715           Mask);
5716       CommonCost =
5717           TTI->getShuffleCost(TargetTransformInfo::SK_Select, FinalVecTy, Mask);
5718       LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
5719       return CommonCost + VecCost - ScalarCost;
5720     }
5721     default:
5722       llvm_unreachable("Unknown instruction");
5723   }
5724 }
5725 
5726 bool BoUpSLP::isFullyVectorizableTinyTree(bool ForReduction) const {
5727   LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height "
5728                     << VectorizableTree.size() << " is fully vectorizable .\n");
5729 
5730   auto &&AreVectorizableGathers = [this](const TreeEntry *TE, unsigned Limit) {
5731     SmallVector<int> Mask;
5732     return TE->State == TreeEntry::NeedToGather &&
5733            !any_of(TE->Scalars,
5734                    [this](Value *V) { return EphValues.contains(V); }) &&
5735            (allConstant(TE->Scalars) || isSplat(TE->Scalars) ||
5736             TE->Scalars.size() < Limit ||
5737             ((TE->getOpcode() == Instruction::ExtractElement ||
5738               all_of(TE->Scalars,
5739                      [](Value *V) {
5740                        return isa<ExtractElementInst, UndefValue>(V);
5741                      })) &&
5742              isFixedVectorShuffle(TE->Scalars, Mask)) ||
5743             (TE->State == TreeEntry::NeedToGather &&
5744              TE->getOpcode() == Instruction::Load && !TE->isAltShuffle()));
5745   };
5746 
5747   // We only handle trees of heights 1 and 2.
5748   if (VectorizableTree.size() == 1 &&
5749       (VectorizableTree[0]->State == TreeEntry::Vectorize ||
5750        (ForReduction &&
5751         AreVectorizableGathers(VectorizableTree[0].get(),
5752                                VectorizableTree[0]->Scalars.size()) &&
5753         VectorizableTree[0]->getVectorFactor() > 2)))
5754     return true;
5755 
5756   if (VectorizableTree.size() != 2)
5757     return false;
5758 
5759   // Handle splat and all-constants stores. Also try to vectorize tiny trees
5760   // with the second gather nodes if they have less scalar operands rather than
5761   // the initial tree element (may be profitable to shuffle the second gather)
5762   // or they are extractelements, which form shuffle.
5763   SmallVector<int> Mask;
5764   if (VectorizableTree[0]->State == TreeEntry::Vectorize &&
5765       AreVectorizableGathers(VectorizableTree[1].get(),
5766                              VectorizableTree[0]->Scalars.size()))
5767     return true;
5768 
5769   // Gathering cost would be too much for tiny trees.
5770   if (VectorizableTree[0]->State == TreeEntry::NeedToGather ||
5771       (VectorizableTree[1]->State == TreeEntry::NeedToGather &&
5772        VectorizableTree[0]->State != TreeEntry::ScatterVectorize))
5773     return false;
5774 
5775   return true;
5776 }
5777 
5778 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts,
5779                                        TargetTransformInfo *TTI,
5780                                        bool MustMatchOrInst) {
5781   // Look past the root to find a source value. Arbitrarily follow the
5782   // path through operand 0 of any 'or'. Also, peek through optional
5783   // shift-left-by-multiple-of-8-bits.
5784   Value *ZextLoad = Root;
5785   const APInt *ShAmtC;
5786   bool FoundOr = false;
5787   while (!isa<ConstantExpr>(ZextLoad) &&
5788          (match(ZextLoad, m_Or(m_Value(), m_Value())) ||
5789           (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) &&
5790            ShAmtC->urem(8) == 0))) {
5791     auto *BinOp = cast<BinaryOperator>(ZextLoad);
5792     ZextLoad = BinOp->getOperand(0);
5793     if (BinOp->getOpcode() == Instruction::Or)
5794       FoundOr = true;
5795   }
5796   // Check if the input is an extended load of the required or/shift expression.
5797   Value *Load;
5798   if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root ||
5799       !match(ZextLoad, m_ZExt(m_Value(Load))) || !isa<LoadInst>(Load))
5800     return false;
5801 
5802   // Require that the total load bit width is a legal integer type.
5803   // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target.
5804   // But <16 x i8> --> i128 is not, so the backend probably can't reduce it.
5805   Type *SrcTy = Load->getType();
5806   unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts;
5807   if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth)))
5808     return false;
5809 
5810   // Everything matched - assume that we can fold the whole sequence using
5811   // load combining.
5812   LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at "
5813              << *(cast<Instruction>(Root)) << "\n");
5814 
5815   return true;
5816 }
5817 
5818 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const {
5819   if (RdxKind != RecurKind::Or)
5820     return false;
5821 
5822   unsigned NumElts = VectorizableTree[0]->Scalars.size();
5823   Value *FirstReduced = VectorizableTree[0]->Scalars[0];
5824   return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI,
5825                                     /* MatchOr */ false);
5826 }
5827 
5828 bool BoUpSLP::isLoadCombineCandidate() const {
5829   // Peek through a final sequence of stores and check if all operations are
5830   // likely to be load-combined.
5831   unsigned NumElts = VectorizableTree[0]->Scalars.size();
5832   for (Value *Scalar : VectorizableTree[0]->Scalars) {
5833     Value *X;
5834     if (!match(Scalar, m_Store(m_Value(X), m_Value())) ||
5835         !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true))
5836       return false;
5837   }
5838   return true;
5839 }
5840 
5841 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const {
5842   // No need to vectorize inserts of gathered values.
5843   if (VectorizableTree.size() == 2 &&
5844       isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) &&
5845       VectorizableTree[1]->State == TreeEntry::NeedToGather)
5846     return true;
5847 
5848   // We can vectorize the tree if its size is greater than or equal to the
5849   // minimum size specified by the MinTreeSize command line option.
5850   if (VectorizableTree.size() >= MinTreeSize)
5851     return false;
5852 
5853   // If we have a tiny tree (a tree whose size is less than MinTreeSize), we
5854   // can vectorize it if we can prove it fully vectorizable.
5855   if (isFullyVectorizableTinyTree(ForReduction))
5856     return false;
5857 
5858   assert(VectorizableTree.empty()
5859              ? ExternalUses.empty()
5860              : true && "We shouldn't have any external users");
5861 
5862   // Otherwise, we can't vectorize the tree. It is both tiny and not fully
5863   // vectorizable.
5864   return true;
5865 }
5866 
5867 InstructionCost BoUpSLP::getSpillCost() const {
5868   // Walk from the bottom of the tree to the top, tracking which values are
5869   // live. When we see a call instruction that is not part of our tree,
5870   // query TTI to see if there is a cost to keeping values live over it
5871   // (for example, if spills and fills are required).
5872   unsigned BundleWidth = VectorizableTree.front()->Scalars.size();
5873   InstructionCost Cost = 0;
5874 
5875   SmallPtrSet<Instruction*, 4> LiveValues;
5876   Instruction *PrevInst = nullptr;
5877 
5878   // The entries in VectorizableTree are not necessarily ordered by their
5879   // position in basic blocks. Collect them and order them by dominance so later
5880   // instructions are guaranteed to be visited first. For instructions in
5881   // different basic blocks, we only scan to the beginning of the block, so
5882   // their order does not matter, as long as all instructions in a basic block
5883   // are grouped together. Using dominance ensures a deterministic order.
5884   SmallVector<Instruction *, 16> OrderedScalars;
5885   for (const auto &TEPtr : VectorizableTree) {
5886     Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]);
5887     if (!Inst)
5888       continue;
5889     OrderedScalars.push_back(Inst);
5890   }
5891   llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) {
5892     auto *NodeA = DT->getNode(A->getParent());
5893     auto *NodeB = DT->getNode(B->getParent());
5894     assert(NodeA && "Should only process reachable instructions");
5895     assert(NodeB && "Should only process reachable instructions");
5896     assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&
5897            "Different nodes should have different DFS numbers");
5898     if (NodeA != NodeB)
5899       return NodeA->getDFSNumIn() < NodeB->getDFSNumIn();
5900     return B->comesBefore(A);
5901   });
5902 
5903   for (Instruction *Inst : OrderedScalars) {
5904     if (!PrevInst) {
5905       PrevInst = Inst;
5906       continue;
5907     }
5908 
5909     // Update LiveValues.
5910     LiveValues.erase(PrevInst);
5911     for (auto &J : PrevInst->operands()) {
5912       if (isa<Instruction>(&*J) && getTreeEntry(&*J))
5913         LiveValues.insert(cast<Instruction>(&*J));
5914     }
5915 
5916     LLVM_DEBUG({
5917       dbgs() << "SLP: #LV: " << LiveValues.size();
5918       for (auto *X : LiveValues)
5919         dbgs() << " " << X->getName();
5920       dbgs() << ", Looking at ";
5921       Inst->dump();
5922     });
5923 
5924     // Now find the sequence of instructions between PrevInst and Inst.
5925     unsigned NumCalls = 0;
5926     BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(),
5927                                  PrevInstIt =
5928                                      PrevInst->getIterator().getReverse();
5929     while (InstIt != PrevInstIt) {
5930       if (PrevInstIt == PrevInst->getParent()->rend()) {
5931         PrevInstIt = Inst->getParent()->rbegin();
5932         continue;
5933       }
5934 
5935       // Debug information does not impact spill cost.
5936       if ((isa<CallInst>(&*PrevInstIt) &&
5937            !isa<DbgInfoIntrinsic>(&*PrevInstIt)) &&
5938           &*PrevInstIt != PrevInst)
5939         NumCalls++;
5940 
5941       ++PrevInstIt;
5942     }
5943 
5944     if (NumCalls) {
5945       SmallVector<Type*, 4> V;
5946       for (auto *II : LiveValues) {
5947         auto *ScalarTy = II->getType();
5948         if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy))
5949           ScalarTy = VectorTy->getElementType();
5950         V.push_back(FixedVectorType::get(ScalarTy, BundleWidth));
5951       }
5952       Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V);
5953     }
5954 
5955     PrevInst = Inst;
5956   }
5957 
5958   return Cost;
5959 }
5960 
5961 /// Check if two insertelement instructions are from the same buildvector.
5962 static bool areTwoInsertFromSameBuildVector(InsertElementInst *VU,
5963                                             InsertElementInst *V) {
5964   // Instructions must be from the same basic blocks.
5965   if (VU->getParent() != V->getParent())
5966     return false;
5967   // Checks if 2 insertelements are from the same buildvector.
5968   if (VU->getType() != V->getType())
5969     return false;
5970   // Multiple used inserts are separate nodes.
5971   if (!VU->hasOneUse() && !V->hasOneUse())
5972     return false;
5973   auto *IE1 = VU;
5974   auto *IE2 = V;
5975   // Go through the vector operand of insertelement instructions trying to find
5976   // either VU as the original vector for IE2 or V as the original vector for
5977   // IE1.
5978   do {
5979     if (IE2 == VU || IE1 == V)
5980       return true;
5981     if (IE1) {
5982       if (IE1 != VU && !IE1->hasOneUse())
5983         IE1 = nullptr;
5984       else
5985         IE1 = dyn_cast<InsertElementInst>(IE1->getOperand(0));
5986     }
5987     if (IE2) {
5988       if (IE2 != V && !IE2->hasOneUse())
5989         IE2 = nullptr;
5990       else
5991         IE2 = dyn_cast<InsertElementInst>(IE2->getOperand(0));
5992     }
5993   } while (IE1 || IE2);
5994   return false;
5995 }
5996 
5997 InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
5998   InstructionCost Cost = 0;
5999   LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size "
6000                     << VectorizableTree.size() << ".\n");
6001 
6002   unsigned BundleWidth = VectorizableTree[0]->Scalars.size();
6003 
6004   for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) {
6005     TreeEntry &TE = *VectorizableTree[I].get();
6006 
6007     InstructionCost C = getEntryCost(&TE, VectorizedVals);
6008     Cost += C;
6009     LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
6010                       << " for bundle that starts with " << *TE.Scalars[0]
6011                       << ".\n"
6012                       << "SLP: Current total cost = " << Cost << "\n");
6013   }
6014 
6015   SmallPtrSet<Value *, 16> ExtractCostCalculated;
6016   InstructionCost ExtractCost = 0;
6017   SmallVector<unsigned> VF;
6018   SmallVector<SmallVector<int>> ShuffleMask;
6019   SmallVector<Value *> FirstUsers;
6020   SmallVector<APInt> DemandedElts;
6021   for (ExternalUser &EU : ExternalUses) {
6022     // We only add extract cost once for the same scalar.
6023     if (!isa_and_nonnull<InsertElementInst>(EU.User) &&
6024         !ExtractCostCalculated.insert(EU.Scalar).second)
6025       continue;
6026 
6027     // Uses by ephemeral values are free (because the ephemeral value will be
6028     // removed prior to code generation, and so the extraction will be
6029     // removed as well).
6030     if (EphValues.count(EU.User))
6031       continue;
6032 
6033     // No extract cost for vector "scalar"
6034     if (isa<FixedVectorType>(EU.Scalar->getType()))
6035       continue;
6036 
6037     // Already counted the cost for external uses when tried to adjust the cost
6038     // for extractelements, no need to add it again.
6039     if (isa<ExtractElementInst>(EU.Scalar))
6040       continue;
6041 
6042     // If found user is an insertelement, do not calculate extract cost but try
6043     // to detect it as a final shuffled/identity match.
6044     if (auto *VU = dyn_cast_or_null<InsertElementInst>(EU.User)) {
6045       if (auto *FTy = dyn_cast<FixedVectorType>(VU->getType())) {
6046         Optional<unsigned> InsertIdx = getInsertIndex(VU);
6047         if (InsertIdx) {
6048           auto *It = find_if(FirstUsers, [VU](Value *V) {
6049             return areTwoInsertFromSameBuildVector(VU,
6050                                                    cast<InsertElementInst>(V));
6051           });
6052           int VecId = -1;
6053           if (It == FirstUsers.end()) {
6054             VF.push_back(FTy->getNumElements());
6055             ShuffleMask.emplace_back(VF.back(), UndefMaskElem);
6056             // Find the insertvector, vectorized in tree, if any.
6057             Value *Base = VU;
6058             while (isa<InsertElementInst>(Base)) {
6059               // Build the mask for the vectorized insertelement instructions.
6060               if (const TreeEntry *E = getTreeEntry(Base)) {
6061                 VU = cast<InsertElementInst>(Base);
6062                 do {
6063                   int Idx = E->findLaneForValue(Base);
6064                   ShuffleMask.back()[Idx] = Idx;
6065                   Base = cast<InsertElementInst>(Base)->getOperand(0);
6066                 } while (E == getTreeEntry(Base));
6067                 break;
6068               }
6069               Base = cast<InsertElementInst>(Base)->getOperand(0);
6070             }
6071             FirstUsers.push_back(VU);
6072             DemandedElts.push_back(APInt::getZero(VF.back()));
6073             VecId = FirstUsers.size() - 1;
6074           } else {
6075             VecId = std::distance(FirstUsers.begin(), It);
6076           }
6077           ShuffleMask[VecId][*InsertIdx] = EU.Lane;
6078           DemandedElts[VecId].setBit(*InsertIdx);
6079           continue;
6080         }
6081       }
6082     }
6083 
6084     // If we plan to rewrite the tree in a smaller type, we will need to sign
6085     // extend the extracted value back to the original type. Here, we account
6086     // for the extract and the added cost of the sign extend if needed.
6087     auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth);
6088     auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
6089     if (MinBWs.count(ScalarRoot)) {
6090       auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
6091       auto Extend =
6092           MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt;
6093       VecTy = FixedVectorType::get(MinTy, BundleWidth);
6094       ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(),
6095                                                    VecTy, EU.Lane);
6096     } else {
6097       ExtractCost +=
6098           TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane);
6099     }
6100   }
6101 
6102   InstructionCost SpillCost = getSpillCost();
6103   Cost += SpillCost + ExtractCost;
6104   if (FirstUsers.size() == 1) {
6105     int Limit = ShuffleMask.front().size() * 2;
6106     if (all_of(ShuffleMask.front(), [Limit](int Idx) { return Idx < Limit; }) &&
6107         !ShuffleVectorInst::isIdentityMask(ShuffleMask.front())) {
6108       InstructionCost C = TTI->getShuffleCost(
6109           TTI::SK_PermuteSingleSrc,
6110           cast<FixedVectorType>(FirstUsers.front()->getType()),
6111           ShuffleMask.front());
6112       LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
6113                         << " for final shuffle of insertelement external users "
6114                         << *VectorizableTree.front()->Scalars.front() << ".\n"
6115                         << "SLP: Current total cost = " << Cost << "\n");
6116       Cost += C;
6117     }
6118     InstructionCost InsertCost = TTI->getScalarizationOverhead(
6119         cast<FixedVectorType>(FirstUsers.front()->getType()),
6120         DemandedElts.front(), /*Insert*/ true, /*Extract*/ false);
6121     LLVM_DEBUG(dbgs() << "SLP: subtracting the cost " << InsertCost
6122                       << " for insertelements gather.\n"
6123                       << "SLP: Current total cost = " << Cost << "\n");
6124     Cost -= InsertCost;
6125   } else if (FirstUsers.size() >= 2) {
6126     unsigned MaxVF = *std::max_element(VF.begin(), VF.end());
6127     // Combined masks of the first 2 vectors.
6128     SmallVector<int> CombinedMask(MaxVF, UndefMaskElem);
6129     copy(ShuffleMask.front(), CombinedMask.begin());
6130     APInt CombinedDemandedElts = DemandedElts.front().zextOrSelf(MaxVF);
6131     auto *VecTy = FixedVectorType::get(
6132         cast<VectorType>(FirstUsers.front()->getType())->getElementType(),
6133         MaxVF);
6134     for (int I = 0, E = ShuffleMask[1].size(); I < E; ++I) {
6135       if (ShuffleMask[1][I] != UndefMaskElem) {
6136         CombinedMask[I] = ShuffleMask[1][I] + MaxVF;
6137         CombinedDemandedElts.setBit(I);
6138       }
6139     }
6140     InstructionCost C =
6141         TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, VecTy, CombinedMask);
6142     LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
6143                       << " for final shuffle of vector node and external "
6144                          "insertelement users "
6145                       << *VectorizableTree.front()->Scalars.front() << ".\n"
6146                       << "SLP: Current total cost = " << Cost << "\n");
6147     Cost += C;
6148     InstructionCost InsertCost = TTI->getScalarizationOverhead(
6149         VecTy, CombinedDemandedElts, /*Insert*/ true, /*Extract*/ false);
6150     LLVM_DEBUG(dbgs() << "SLP: subtracting the cost " << InsertCost
6151                       << " for insertelements gather.\n"
6152                       << "SLP: Current total cost = " << Cost << "\n");
6153     Cost -= InsertCost;
6154     for (int I = 2, E = FirstUsers.size(); I < E; ++I) {
6155       // Other elements - permutation of 2 vectors (the initial one and the
6156       // next Ith incoming vector).
6157       unsigned VF = ShuffleMask[I].size();
6158       for (unsigned Idx = 0; Idx < VF; ++Idx) {
6159         int Mask = ShuffleMask[I][Idx];
6160         if (Mask != UndefMaskElem)
6161           CombinedMask[Idx] = MaxVF + Mask;
6162         else if (CombinedMask[Idx] != UndefMaskElem)
6163           CombinedMask[Idx] = Idx;
6164       }
6165       for (unsigned Idx = VF; Idx < MaxVF; ++Idx)
6166         if (CombinedMask[Idx] != UndefMaskElem)
6167           CombinedMask[Idx] = Idx;
6168       InstructionCost C =
6169           TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, VecTy, CombinedMask);
6170       LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
6171                         << " for final shuffle of vector node and external "
6172                            "insertelement users "
6173                         << *VectorizableTree.front()->Scalars.front() << ".\n"
6174                         << "SLP: Current total cost = " << Cost << "\n");
6175       Cost += C;
6176       InstructionCost InsertCost = TTI->getScalarizationOverhead(
6177           cast<FixedVectorType>(FirstUsers[I]->getType()), DemandedElts[I],
6178           /*Insert*/ true, /*Extract*/ false);
6179       LLVM_DEBUG(dbgs() << "SLP: subtracting the cost " << InsertCost
6180                         << " for insertelements gather.\n"
6181                         << "SLP: Current total cost = " << Cost << "\n");
6182       Cost -= InsertCost;
6183     }
6184   }
6185 
6186 #ifndef NDEBUG
6187   SmallString<256> Str;
6188   {
6189     raw_svector_ostream OS(Str);
6190     OS << "SLP: Spill Cost = " << SpillCost << ".\n"
6191        << "SLP: Extract Cost = " << ExtractCost << ".\n"
6192        << "SLP: Total Cost = " << Cost << ".\n";
6193   }
6194   LLVM_DEBUG(dbgs() << Str);
6195   if (ViewSLPTree)
6196     ViewGraph(this, "SLP" + F->getName(), false, Str);
6197 #endif
6198 
6199   return Cost;
6200 }
6201 
6202 Optional<TargetTransformInfo::ShuffleKind>
6203 BoUpSLP::isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask,
6204                                SmallVectorImpl<const TreeEntry *> &Entries) {
6205   // TODO: currently checking only for Scalars in the tree entry, need to count
6206   // reused elements too for better cost estimation.
6207   Mask.assign(TE->Scalars.size(), UndefMaskElem);
6208   Entries.clear();
6209   // Build a lists of values to tree entries.
6210   DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>> ValueToTEs;
6211   for (const std::unique_ptr<TreeEntry> &EntryPtr : VectorizableTree) {
6212     if (EntryPtr.get() == TE)
6213       break;
6214     if (EntryPtr->State != TreeEntry::NeedToGather)
6215       continue;
6216     for (Value *V : EntryPtr->Scalars)
6217       ValueToTEs.try_emplace(V).first->getSecond().insert(EntryPtr.get());
6218   }
6219   // Find all tree entries used by the gathered values. If no common entries
6220   // found - not a shuffle.
6221   // Here we build a set of tree nodes for each gathered value and trying to
6222   // find the intersection between these sets. If we have at least one common
6223   // tree node for each gathered value - we have just a permutation of the
6224   // single vector. If we have 2 different sets, we're in situation where we
6225   // have a permutation of 2 input vectors.
6226   SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs;
6227   DenseMap<Value *, int> UsedValuesEntry;
6228   for (Value *V : TE->Scalars) {
6229     if (isa<UndefValue>(V))
6230       continue;
6231     // Build a list of tree entries where V is used.
6232     SmallPtrSet<const TreeEntry *, 4> VToTEs;
6233     auto It = ValueToTEs.find(V);
6234     if (It != ValueToTEs.end())
6235       VToTEs = It->second;
6236     if (const TreeEntry *VTE = getTreeEntry(V))
6237       VToTEs.insert(VTE);
6238     if (VToTEs.empty())
6239       return None;
6240     if (UsedTEs.empty()) {
6241       // The first iteration, just insert the list of nodes to vector.
6242       UsedTEs.push_back(VToTEs);
6243     } else {
6244       // Need to check if there are any previously used tree nodes which use V.
6245       // If there are no such nodes, consider that we have another one input
6246       // vector.
6247       SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs);
6248       unsigned Idx = 0;
6249       for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) {
6250         // Do we have a non-empty intersection of previously listed tree entries
6251         // and tree entries using current V?
6252         set_intersect(VToTEs, Set);
6253         if (!VToTEs.empty()) {
6254           // Yes, write the new subset and continue analysis for the next
6255           // scalar.
6256           Set.swap(VToTEs);
6257           break;
6258         }
6259         VToTEs = SavedVToTEs;
6260         ++Idx;
6261       }
6262       // No non-empty intersection found - need to add a second set of possible
6263       // source vectors.
6264       if (Idx == UsedTEs.size()) {
6265         // If the number of input vectors is greater than 2 - not a permutation,
6266         // fallback to the regular gather.
6267         if (UsedTEs.size() == 2)
6268           return None;
6269         UsedTEs.push_back(SavedVToTEs);
6270         Idx = UsedTEs.size() - 1;
6271       }
6272       UsedValuesEntry.try_emplace(V, Idx);
6273     }
6274   }
6275 
6276   unsigned VF = 0;
6277   if (UsedTEs.size() == 1) {
6278     // Try to find the perfect match in another gather node at first.
6279     auto It = find_if(UsedTEs.front(), [TE](const TreeEntry *EntryPtr) {
6280       return EntryPtr->isSame(TE->Scalars);
6281     });
6282     if (It != UsedTEs.front().end()) {
6283       Entries.push_back(*It);
6284       std::iota(Mask.begin(), Mask.end(), 0);
6285       return TargetTransformInfo::SK_PermuteSingleSrc;
6286     }
6287     // No perfect match, just shuffle, so choose the first tree node.
6288     Entries.push_back(*UsedTEs.front().begin());
6289   } else {
6290     // Try to find nodes with the same vector factor.
6291     assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries.");
6292     DenseMap<int, const TreeEntry *> VFToTE;
6293     for (const TreeEntry *TE : UsedTEs.front())
6294       VFToTE.try_emplace(TE->getVectorFactor(), TE);
6295     for (const TreeEntry *TE : UsedTEs.back()) {
6296       auto It = VFToTE.find(TE->getVectorFactor());
6297       if (It != VFToTE.end()) {
6298         VF = It->first;
6299         Entries.push_back(It->second);
6300         Entries.push_back(TE);
6301         break;
6302       }
6303     }
6304     // No 2 source vectors with the same vector factor - give up and do regular
6305     // gather.
6306     if (Entries.empty())
6307       return None;
6308   }
6309 
6310   // Build a shuffle mask for better cost estimation and vector emission.
6311   for (int I = 0, E = TE->Scalars.size(); I < E; ++I) {
6312     Value *V = TE->Scalars[I];
6313     if (isa<UndefValue>(V))
6314       continue;
6315     unsigned Idx = UsedValuesEntry.lookup(V);
6316     const TreeEntry *VTE = Entries[Idx];
6317     int FoundLane = VTE->findLaneForValue(V);
6318     Mask[I] = Idx * VF + FoundLane;
6319     // Extra check required by isSingleSourceMaskImpl function (called by
6320     // ShuffleVectorInst::isSingleSourceMask).
6321     if (Mask[I] >= 2 * E)
6322       return None;
6323   }
6324   switch (Entries.size()) {
6325   case 1:
6326     return TargetTransformInfo::SK_PermuteSingleSrc;
6327   case 2:
6328     return TargetTransformInfo::SK_PermuteTwoSrc;
6329   default:
6330     break;
6331   }
6332   return None;
6333 }
6334 
6335 InstructionCost BoUpSLP::getGatherCost(FixedVectorType *Ty,
6336                                        const APInt &ShuffledIndices,
6337                                        bool NeedToShuffle) const {
6338   InstructionCost Cost =
6339       TTI->getScalarizationOverhead(Ty, ~ShuffledIndices, /*Insert*/ true,
6340                                     /*Extract*/ false);
6341   if (NeedToShuffle)
6342     Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty);
6343   return Cost;
6344 }
6345 
6346 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const {
6347   // Find the type of the operands in VL.
6348   Type *ScalarTy = VL[0]->getType();
6349   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
6350     ScalarTy = SI->getValueOperand()->getType();
6351   auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
6352   bool DuplicateNonConst = false;
6353   // Find the cost of inserting/extracting values from the vector.
6354   // Check if the same elements are inserted several times and count them as
6355   // shuffle candidates.
6356   APInt ShuffledElements = APInt::getZero(VL.size());
6357   DenseSet<Value *> UniqueElements;
6358   // Iterate in reverse order to consider insert elements with the high cost.
6359   for (unsigned I = VL.size(); I > 0; --I) {
6360     unsigned Idx = I - 1;
6361     // No need to shuffle duplicates for constants.
6362     if (isConstant(VL[Idx])) {
6363       ShuffledElements.setBit(Idx);
6364       continue;
6365     }
6366     if (!UniqueElements.insert(VL[Idx]).second) {
6367       DuplicateNonConst = true;
6368       ShuffledElements.setBit(Idx);
6369     }
6370   }
6371   return getGatherCost(VecTy, ShuffledElements, DuplicateNonConst);
6372 }
6373 
6374 // Perform operand reordering on the instructions in VL and return the reordered
6375 // operands in Left and Right.
6376 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
6377                                              SmallVectorImpl<Value *> &Left,
6378                                              SmallVectorImpl<Value *> &Right,
6379                                              const DataLayout &DL,
6380                                              ScalarEvolution &SE,
6381                                              const BoUpSLP &R) {
6382   if (VL.empty())
6383     return;
6384   VLOperands Ops(VL, DL, SE, R);
6385   // Reorder the operands in place.
6386   Ops.reorder();
6387   Left = Ops.getVL(0);
6388   Right = Ops.getVL(1);
6389 }
6390 
6391 void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) {
6392   // Get the basic block this bundle is in. All instructions in the bundle
6393   // should be in this block.
6394   auto *Front = E->getMainOp();
6395   auto *BB = Front->getParent();
6396   assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool {
6397     auto *I = cast<Instruction>(V);
6398     return !E->isOpcodeOrAlt(I) || I->getParent() == BB;
6399   }));
6400 
6401   // The last instruction in the bundle in program order.
6402   Instruction *LastInst = nullptr;
6403 
6404   // Find the last instruction. The common case should be that BB has been
6405   // scheduled, and the last instruction is VL.back(). So we start with
6406   // VL.back() and iterate over schedule data until we reach the end of the
6407   // bundle. The end of the bundle is marked by null ScheduleData.
6408   if (BlocksSchedules.count(BB)) {
6409     auto *Bundle =
6410         BlocksSchedules[BB]->getScheduleData(E->isOneOf(E->Scalars.back()));
6411     if (Bundle && Bundle->isPartOfBundle())
6412       for (; Bundle; Bundle = Bundle->NextInBundle)
6413         if (Bundle->OpValue == Bundle->Inst)
6414           LastInst = Bundle->Inst;
6415   }
6416 
6417   // LastInst can still be null at this point if there's either not an entry
6418   // for BB in BlocksSchedules or there's no ScheduleData available for
6419   // VL.back(). This can be the case if buildTree_rec aborts for various
6420   // reasons (e.g., the maximum recursion depth is reached, the maximum region
6421   // size is reached, etc.). ScheduleData is initialized in the scheduling
6422   // "dry-run".
6423   //
6424   // If this happens, we can still find the last instruction by brute force. We
6425   // iterate forwards from Front (inclusive) until we either see all
6426   // instructions in the bundle or reach the end of the block. If Front is the
6427   // last instruction in program order, LastInst will be set to Front, and we
6428   // will visit all the remaining instructions in the block.
6429   //
6430   // One of the reasons we exit early from buildTree_rec is to place an upper
6431   // bound on compile-time. Thus, taking an additional compile-time hit here is
6432   // not ideal. However, this should be exceedingly rare since it requires that
6433   // we both exit early from buildTree_rec and that the bundle be out-of-order
6434   // (causing us to iterate all the way to the end of the block).
6435   if (!LastInst) {
6436     SmallPtrSet<Value *, 16> Bundle(E->Scalars.begin(), E->Scalars.end());
6437     for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) {
6438       if (Bundle.erase(&I) && E->isOpcodeOrAlt(&I))
6439         LastInst = &I;
6440       if (Bundle.empty())
6441         break;
6442     }
6443   }
6444   assert(LastInst && "Failed to find last instruction in bundle");
6445 
6446   // Set the insertion point after the last instruction in the bundle. Set the
6447   // debug location to Front.
6448   Builder.SetInsertPoint(BB, ++LastInst->getIterator());
6449   Builder.SetCurrentDebugLocation(Front->getDebugLoc());
6450 }
6451 
6452 Value *BoUpSLP::gather(ArrayRef<Value *> VL) {
6453   // List of instructions/lanes from current block and/or the blocks which are
6454   // part of the current loop. These instructions will be inserted at the end to
6455   // make it possible to optimize loops and hoist invariant instructions out of
6456   // the loops body with better chances for success.
6457   SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts;
6458   SmallSet<int, 4> PostponedIndices;
6459   Loop *L = LI->getLoopFor(Builder.GetInsertBlock());
6460   auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) {
6461     SmallPtrSet<BasicBlock *, 4> Visited;
6462     while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second)
6463       InsertBB = InsertBB->getSinglePredecessor();
6464     return InsertBB && InsertBB == InstBB;
6465   };
6466   for (int I = 0, E = VL.size(); I < E; ++I) {
6467     if (auto *Inst = dyn_cast<Instruction>(VL[I]))
6468       if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) ||
6469            getTreeEntry(Inst) || (L && (L->contains(Inst)))) &&
6470           PostponedIndices.insert(I).second)
6471         PostponedInsts.emplace_back(Inst, I);
6472   }
6473 
6474   auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos) {
6475     Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(Pos));
6476     auto *InsElt = dyn_cast<InsertElementInst>(Vec);
6477     if (!InsElt)
6478       return Vec;
6479     GatherShuffleSeq.insert(InsElt);
6480     CSEBlocks.insert(InsElt->getParent());
6481     // Add to our 'need-to-extract' list.
6482     if (TreeEntry *Entry = getTreeEntry(V)) {
6483       // Find which lane we need to extract.
6484       unsigned FoundLane = Entry->findLaneForValue(V);
6485       ExternalUses.emplace_back(V, InsElt, FoundLane);
6486     }
6487     return Vec;
6488   };
6489   Value *Val0 =
6490       isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0];
6491   FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size());
6492   Value *Vec = PoisonValue::get(VecTy);
6493   SmallVector<int> NonConsts;
6494   // Insert constant values at first.
6495   for (int I = 0, E = VL.size(); I < E; ++I) {
6496     if (PostponedIndices.contains(I))
6497       continue;
6498     if (!isConstant(VL[I])) {
6499       NonConsts.push_back(I);
6500       continue;
6501     }
6502     Vec = CreateInsertElement(Vec, VL[I], I);
6503   }
6504   // Insert non-constant values.
6505   for (int I : NonConsts)
6506     Vec = CreateInsertElement(Vec, VL[I], I);
6507   // Append instructions, which are/may be part of the loop, in the end to make
6508   // it possible to hoist non-loop-based instructions.
6509   for (const std::pair<Value *, unsigned> &Pair : PostponedInsts)
6510     Vec = CreateInsertElement(Vec, Pair.first, Pair.second);
6511 
6512   return Vec;
6513 }
6514 
6515 namespace {
6516 /// Merges shuffle masks and emits final shuffle instruction, if required.
6517 class ShuffleInstructionBuilder {
6518   IRBuilderBase &Builder;
6519   const unsigned VF = 0;
6520   bool IsFinalized = false;
6521   SmallVector<int, 4> Mask;
6522   /// Holds all of the instructions that we gathered.
6523   SetVector<Instruction *> &GatherShuffleSeq;
6524   /// A list of blocks that we are going to CSE.
6525   SetVector<BasicBlock *> &CSEBlocks;
6526 
6527 public:
6528   ShuffleInstructionBuilder(IRBuilderBase &Builder, unsigned VF,
6529                             SetVector<Instruction *> &GatherShuffleSeq,
6530                             SetVector<BasicBlock *> &CSEBlocks)
6531       : Builder(Builder), VF(VF), GatherShuffleSeq(GatherShuffleSeq),
6532         CSEBlocks(CSEBlocks) {}
6533 
6534   /// Adds a mask, inverting it before applying.
6535   void addInversedMask(ArrayRef<unsigned> SubMask) {
6536     if (SubMask.empty())
6537       return;
6538     SmallVector<int, 4> NewMask;
6539     inversePermutation(SubMask, NewMask);
6540     addMask(NewMask);
6541   }
6542 
6543   /// Functions adds masks, merging them into  single one.
6544   void addMask(ArrayRef<unsigned> SubMask) {
6545     SmallVector<int, 4> NewMask(SubMask.begin(), SubMask.end());
6546     addMask(NewMask);
6547   }
6548 
6549   void addMask(ArrayRef<int> SubMask) { ::addMask(Mask, SubMask); }
6550 
6551   Value *finalize(Value *V) {
6552     IsFinalized = true;
6553     unsigned ValueVF = cast<FixedVectorType>(V->getType())->getNumElements();
6554     if (VF == ValueVF && Mask.empty())
6555       return V;
6556     SmallVector<int, 4> NormalizedMask(VF, UndefMaskElem);
6557     std::iota(NormalizedMask.begin(), NormalizedMask.end(), 0);
6558     addMask(NormalizedMask);
6559 
6560     if (VF == ValueVF && ShuffleVectorInst::isIdentityMask(Mask))
6561       return V;
6562     Value *Vec = Builder.CreateShuffleVector(V, Mask, "shuffle");
6563     if (auto *I = dyn_cast<Instruction>(Vec)) {
6564       GatherShuffleSeq.insert(I);
6565       CSEBlocks.insert(I->getParent());
6566     }
6567     return Vec;
6568   }
6569 
6570   ~ShuffleInstructionBuilder() {
6571     assert((IsFinalized || Mask.empty()) &&
6572            "Shuffle construction must be finalized.");
6573   }
6574 };
6575 } // namespace
6576 
6577 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
6578   const unsigned VF = VL.size();
6579   InstructionsState S = getSameOpcode(VL);
6580   if (S.getOpcode()) {
6581     if (TreeEntry *E = getTreeEntry(S.OpValue))
6582       if (E->isSame(VL)) {
6583         Value *V = vectorizeTree(E);
6584         if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) {
6585           if (!E->ReuseShuffleIndices.empty()) {
6586             // Reshuffle to get only unique values.
6587             // If some of the scalars are duplicated in the vectorization tree
6588             // entry, we do not vectorize them but instead generate a mask for
6589             // the reuses. But if there are several users of the same entry,
6590             // they may have different vectorization factors. This is especially
6591             // important for PHI nodes. In this case, we need to adapt the
6592             // resulting instruction for the user vectorization factor and have
6593             // to reshuffle it again to take only unique elements of the vector.
6594             // Without this code the function incorrectly returns reduced vector
6595             // instruction with the same elements, not with the unique ones.
6596 
6597             // block:
6598             // %phi = phi <2 x > { .., %entry} {%shuffle, %block}
6599             // %2 = shuffle <2 x > %phi, poison, <4 x > <1, 1, 0, 0>
6600             // ... (use %2)
6601             // %shuffle = shuffle <2 x> %2, poison, <2 x> {2, 0}
6602             // br %block
6603             SmallVector<int> UniqueIdxs(VF, UndefMaskElem);
6604             SmallSet<int, 4> UsedIdxs;
6605             int Pos = 0;
6606             int Sz = VL.size();
6607             for (int Idx : E->ReuseShuffleIndices) {
6608               if (Idx != Sz && Idx != UndefMaskElem &&
6609                   UsedIdxs.insert(Idx).second)
6610                 UniqueIdxs[Idx] = Pos;
6611               ++Pos;
6612             }
6613             assert(VF >= UsedIdxs.size() && "Expected vectorization factor "
6614                                             "less than original vector size.");
6615             UniqueIdxs.append(VF - UsedIdxs.size(), UndefMaskElem);
6616             V = Builder.CreateShuffleVector(V, UniqueIdxs, "shrink.shuffle");
6617           } else {
6618             assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() &&
6619                    "Expected vectorization factor less "
6620                    "than original vector size.");
6621             SmallVector<int> UniformMask(VF, 0);
6622             std::iota(UniformMask.begin(), UniformMask.end(), 0);
6623             V = Builder.CreateShuffleVector(V, UniformMask, "shrink.shuffle");
6624           }
6625           if (auto *I = dyn_cast<Instruction>(V)) {
6626             GatherShuffleSeq.insert(I);
6627             CSEBlocks.insert(I->getParent());
6628           }
6629         }
6630         return V;
6631       }
6632   }
6633 
6634   // Can't vectorize this, so simply build a new vector with each lane
6635   // corresponding to the requested value.
6636   return createBuildVector(VL);
6637 }
6638 Value *BoUpSLP::createBuildVector(ArrayRef<Value *> VL) {
6639   unsigned VF = VL.size();
6640   // Exploit possible reuse of values across lanes.
6641   SmallVector<int> ReuseShuffleIndicies;
6642   SmallVector<Value *> UniqueValues;
6643   if (VL.size() > 2) {
6644     DenseMap<Value *, unsigned> UniquePositions;
6645     unsigned NumValues =
6646         std::distance(VL.begin(), find_if(reverse(VL), [](Value *V) {
6647                                     return !isa<UndefValue>(V);
6648                                   }).base());
6649     VF = std::max<unsigned>(VF, PowerOf2Ceil(NumValues));
6650     int UniqueVals = 0;
6651     for (Value *V : VL.drop_back(VL.size() - VF)) {
6652       if (isa<UndefValue>(V)) {
6653         ReuseShuffleIndicies.emplace_back(UndefMaskElem);
6654         continue;
6655       }
6656       if (isConstant(V)) {
6657         ReuseShuffleIndicies.emplace_back(UniqueValues.size());
6658         UniqueValues.emplace_back(V);
6659         continue;
6660       }
6661       auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
6662       ReuseShuffleIndicies.emplace_back(Res.first->second);
6663       if (Res.second) {
6664         UniqueValues.emplace_back(V);
6665         ++UniqueVals;
6666       }
6667     }
6668     if (UniqueVals == 1 && UniqueValues.size() == 1) {
6669       // Emit pure splat vector.
6670       ReuseShuffleIndicies.append(VF - ReuseShuffleIndicies.size(),
6671                                   UndefMaskElem);
6672     } else if (UniqueValues.size() >= VF - 1 || UniqueValues.size() <= 1) {
6673       ReuseShuffleIndicies.clear();
6674       UniqueValues.clear();
6675       UniqueValues.append(VL.begin(), std::next(VL.begin(), NumValues));
6676     }
6677     UniqueValues.append(VF - UniqueValues.size(),
6678                         PoisonValue::get(VL[0]->getType()));
6679     VL = UniqueValues;
6680   }
6681 
6682   ShuffleInstructionBuilder ShuffleBuilder(Builder, VF, GatherShuffleSeq,
6683                                            CSEBlocks);
6684   Value *Vec = gather(VL);
6685   if (!ReuseShuffleIndicies.empty()) {
6686     ShuffleBuilder.addMask(ReuseShuffleIndicies);
6687     Vec = ShuffleBuilder.finalize(Vec);
6688   }
6689   return Vec;
6690 }
6691 
6692 Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
6693   IRBuilder<>::InsertPointGuard Guard(Builder);
6694 
6695   if (E->VectorizedValue) {
6696     LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
6697     return E->VectorizedValue;
6698   }
6699 
6700   bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
6701   unsigned VF = E->getVectorFactor();
6702   ShuffleInstructionBuilder ShuffleBuilder(Builder, VF, GatherShuffleSeq,
6703                                            CSEBlocks);
6704   if (E->State == TreeEntry::NeedToGather) {
6705     if (E->getMainOp())
6706       setInsertPointAfterBundle(E);
6707     Value *Vec;
6708     SmallVector<int> Mask;
6709     SmallVector<const TreeEntry *> Entries;
6710     Optional<TargetTransformInfo::ShuffleKind> Shuffle =
6711         isGatherShuffledEntry(E, Mask, Entries);
6712     if (Shuffle.hasValue()) {
6713       assert((Entries.size() == 1 || Entries.size() == 2) &&
6714              "Expected shuffle of 1 or 2 entries.");
6715       Vec = Builder.CreateShuffleVector(Entries.front()->VectorizedValue,
6716                                         Entries.back()->VectorizedValue, Mask);
6717       if (auto *I = dyn_cast<Instruction>(Vec)) {
6718         GatherShuffleSeq.insert(I);
6719         CSEBlocks.insert(I->getParent());
6720       }
6721     } else {
6722       Vec = gather(E->Scalars);
6723     }
6724     if (NeedToShuffleReuses) {
6725       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
6726       Vec = ShuffleBuilder.finalize(Vec);
6727     }
6728     E->VectorizedValue = Vec;
6729     return Vec;
6730   }
6731 
6732   assert((E->State == TreeEntry::Vectorize ||
6733           E->State == TreeEntry::ScatterVectorize) &&
6734          "Unhandled state");
6735   unsigned ShuffleOrOp =
6736       E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
6737   Instruction *VL0 = E->getMainOp();
6738   Type *ScalarTy = VL0->getType();
6739   if (auto *Store = dyn_cast<StoreInst>(VL0))
6740     ScalarTy = Store->getValueOperand()->getType();
6741   else if (auto *IE = dyn_cast<InsertElementInst>(VL0))
6742     ScalarTy = IE->getOperand(1)->getType();
6743   auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size());
6744   switch (ShuffleOrOp) {
6745     case Instruction::PHI: {
6746       assert(
6747           (E->ReorderIndices.empty() || E != VectorizableTree.front().get()) &&
6748           "PHI reordering is free.");
6749       auto *PH = cast<PHINode>(VL0);
6750       Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI());
6751       Builder.SetCurrentDebugLocation(PH->getDebugLoc());
6752       PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
6753       Value *V = NewPhi;
6754 
6755       // Adjust insertion point once all PHI's have been generated.
6756       Builder.SetInsertPoint(&*PH->getParent()->getFirstInsertionPt());
6757       Builder.SetCurrentDebugLocation(PH->getDebugLoc());
6758 
6759       ShuffleBuilder.addInversedMask(E->ReorderIndices);
6760       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
6761       V = ShuffleBuilder.finalize(V);
6762 
6763       E->VectorizedValue = V;
6764 
6765       // PHINodes may have multiple entries from the same block. We want to
6766       // visit every block once.
6767       SmallPtrSet<BasicBlock*, 4> VisitedBBs;
6768 
6769       for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
6770         ValueList Operands;
6771         BasicBlock *IBB = PH->getIncomingBlock(i);
6772 
6773         if (!VisitedBBs.insert(IBB).second) {
6774           NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
6775           continue;
6776         }
6777 
6778         Builder.SetInsertPoint(IBB->getTerminator());
6779         Builder.SetCurrentDebugLocation(PH->getDebugLoc());
6780         Value *Vec = vectorizeTree(E->getOperand(i));
6781         NewPhi->addIncoming(Vec, IBB);
6782       }
6783 
6784       assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&
6785              "Invalid number of incoming values");
6786       return V;
6787     }
6788 
6789     case Instruction::ExtractElement: {
6790       Value *V = E->getSingleOperand(0);
6791       Builder.SetInsertPoint(VL0);
6792       ShuffleBuilder.addInversedMask(E->ReorderIndices);
6793       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
6794       V = ShuffleBuilder.finalize(V);
6795       E->VectorizedValue = V;
6796       return V;
6797     }
6798     case Instruction::ExtractValue: {
6799       auto *LI = cast<LoadInst>(E->getSingleOperand(0));
6800       Builder.SetInsertPoint(LI);
6801       auto *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace());
6802       Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy);
6803       LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign());
6804       if (MSSA) {
6805         MemorySSAUpdater MSSAU(MSSA);
6806         auto *Access = MSSA->getMemoryAccess(LI);
6807         assert(Access);
6808         MemoryUseOrDef *NewAccess =
6809           MSSAU.createMemoryAccessBefore(V, Access->getDefiningAccess(),
6810                                          Access);
6811         MSSAU.insertUse(cast<MemoryUse>(NewAccess), true);
6812       }
6813       Value *NewV = propagateMetadata(V, E->Scalars);
6814       ShuffleBuilder.addInversedMask(E->ReorderIndices);
6815       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
6816       NewV = ShuffleBuilder.finalize(NewV);
6817       E->VectorizedValue = NewV;
6818       return NewV;
6819     }
6820     case Instruction::InsertElement: {
6821       assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique");
6822       Builder.SetInsertPoint(cast<Instruction>(E->Scalars.back()));
6823       Value *V = vectorizeTree(E->getOperand(1));
6824 
6825       // Create InsertVector shuffle if necessary
6826       auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) {
6827         return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0));
6828       }));
6829       const unsigned NumElts =
6830           cast<FixedVectorType>(FirstInsert->getType())->getNumElements();
6831       const unsigned NumScalars = E->Scalars.size();
6832 
6833       unsigned Offset = *getInsertIndex(VL0);
6834       assert(Offset < NumElts && "Failed to find vector index offset");
6835 
6836       // Create shuffle to resize vector
6837       SmallVector<int> Mask;
6838       if (!E->ReorderIndices.empty()) {
6839         inversePermutation(E->ReorderIndices, Mask);
6840         Mask.append(NumElts - NumScalars, UndefMaskElem);
6841       } else {
6842         Mask.assign(NumElts, UndefMaskElem);
6843         std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0);
6844       }
6845       // Create InsertVector shuffle if necessary
6846       bool IsIdentity = true;
6847       SmallVector<int> PrevMask(NumElts, UndefMaskElem);
6848       Mask.swap(PrevMask);
6849       for (unsigned I = 0; I < NumScalars; ++I) {
6850         Value *Scalar = E->Scalars[PrevMask[I]];
6851         unsigned InsertIdx = *getInsertIndex(Scalar);
6852         IsIdentity &= InsertIdx - Offset == I;
6853         Mask[InsertIdx - Offset] = I;
6854       }
6855       if (!IsIdentity || NumElts != NumScalars) {
6856         V = Builder.CreateShuffleVector(V, Mask);
6857         if (auto *I = dyn_cast<Instruction>(V)) {
6858           GatherShuffleSeq.insert(I);
6859           CSEBlocks.insert(I->getParent());
6860         }
6861       }
6862 
6863       if ((!IsIdentity || Offset != 0 ||
6864            !isUndefVector(FirstInsert->getOperand(0))) &&
6865           NumElts != NumScalars) {
6866         SmallVector<int> InsertMask(NumElts);
6867         std::iota(InsertMask.begin(), InsertMask.end(), 0);
6868         for (unsigned I = 0; I < NumElts; I++) {
6869           if (Mask[I] != UndefMaskElem)
6870             InsertMask[Offset + I] = NumElts + I;
6871         }
6872 
6873         V = Builder.CreateShuffleVector(
6874             FirstInsert->getOperand(0), V, InsertMask,
6875             cast<Instruction>(E->Scalars.back())->getName());
6876         if (auto *I = dyn_cast<Instruction>(V)) {
6877           GatherShuffleSeq.insert(I);
6878           CSEBlocks.insert(I->getParent());
6879         }
6880       }
6881 
6882       ++NumVectorInstructions;
6883       E->VectorizedValue = V;
6884       return V;
6885     }
6886     case Instruction::ZExt:
6887     case Instruction::SExt:
6888     case Instruction::FPToUI:
6889     case Instruction::FPToSI:
6890     case Instruction::FPExt:
6891     case Instruction::PtrToInt:
6892     case Instruction::IntToPtr:
6893     case Instruction::SIToFP:
6894     case Instruction::UIToFP:
6895     case Instruction::Trunc:
6896     case Instruction::FPTrunc:
6897     case Instruction::BitCast: {
6898       setInsertPointAfterBundle(E);
6899 
6900       Value *InVec = vectorizeTree(E->getOperand(0));
6901 
6902       if (E->VectorizedValue) {
6903         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
6904         return E->VectorizedValue;
6905       }
6906 
6907       auto *CI = cast<CastInst>(VL0);
6908       Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy);
6909       ShuffleBuilder.addInversedMask(E->ReorderIndices);
6910       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
6911       V = ShuffleBuilder.finalize(V);
6912 
6913       E->VectorizedValue = V;
6914       ++NumVectorInstructions;
6915       return V;
6916     }
6917     case Instruction::FCmp:
6918     case Instruction::ICmp: {
6919       setInsertPointAfterBundle(E);
6920 
6921       Value *L = vectorizeTree(E->getOperand(0));
6922       Value *R = vectorizeTree(E->getOperand(1));
6923 
6924       if (E->VectorizedValue) {
6925         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
6926         return E->VectorizedValue;
6927       }
6928 
6929       CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
6930       Value *V = Builder.CreateCmp(P0, L, R);
6931       propagateIRFlags(V, E->Scalars, VL0);
6932       ShuffleBuilder.addInversedMask(E->ReorderIndices);
6933       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
6934       V = ShuffleBuilder.finalize(V);
6935 
6936       E->VectorizedValue = V;
6937       ++NumVectorInstructions;
6938       return V;
6939     }
6940     case Instruction::Select: {
6941       setInsertPointAfterBundle(E);
6942 
6943       Value *Cond = vectorizeTree(E->getOperand(0));
6944       Value *True = vectorizeTree(E->getOperand(1));
6945       Value *False = vectorizeTree(E->getOperand(2));
6946 
6947       if (E->VectorizedValue) {
6948         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
6949         return E->VectorizedValue;
6950       }
6951 
6952       Value *V = Builder.CreateSelect(Cond, True, False);
6953       ShuffleBuilder.addInversedMask(E->ReorderIndices);
6954       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
6955       V = ShuffleBuilder.finalize(V);
6956 
6957       E->VectorizedValue = V;
6958       ++NumVectorInstructions;
6959       return V;
6960     }
6961     case Instruction::FNeg: {
6962       setInsertPointAfterBundle(E);
6963 
6964       Value *Op = vectorizeTree(E->getOperand(0));
6965 
6966       if (E->VectorizedValue) {
6967         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
6968         return E->VectorizedValue;
6969       }
6970 
6971       Value *V = Builder.CreateUnOp(
6972           static_cast<Instruction::UnaryOps>(E->getOpcode()), Op);
6973       propagateIRFlags(V, E->Scalars, VL0);
6974       if (auto *I = dyn_cast<Instruction>(V))
6975         V = propagateMetadata(I, E->Scalars);
6976 
6977       ShuffleBuilder.addInversedMask(E->ReorderIndices);
6978       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
6979       V = ShuffleBuilder.finalize(V);
6980 
6981       E->VectorizedValue = V;
6982       ++NumVectorInstructions;
6983 
6984       return V;
6985     }
6986     case Instruction::Add:
6987     case Instruction::FAdd:
6988     case Instruction::Sub:
6989     case Instruction::FSub:
6990     case Instruction::Mul:
6991     case Instruction::FMul:
6992     case Instruction::UDiv:
6993     case Instruction::SDiv:
6994     case Instruction::FDiv:
6995     case Instruction::URem:
6996     case Instruction::SRem:
6997     case Instruction::FRem:
6998     case Instruction::Shl:
6999     case Instruction::LShr:
7000     case Instruction::AShr:
7001     case Instruction::And:
7002     case Instruction::Or:
7003     case Instruction::Xor: {
7004       setInsertPointAfterBundle(E);
7005 
7006       Value *LHS = vectorizeTree(E->getOperand(0));
7007       Value *RHS = vectorizeTree(E->getOperand(1));
7008 
7009       if (E->VectorizedValue) {
7010         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
7011         return E->VectorizedValue;
7012       }
7013 
7014       Value *V = Builder.CreateBinOp(
7015           static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS,
7016           RHS);
7017       propagateIRFlags(V, E->Scalars, VL0);
7018       if (auto *I = dyn_cast<Instruction>(V))
7019         V = propagateMetadata(I, E->Scalars);
7020 
7021       ShuffleBuilder.addInversedMask(E->ReorderIndices);
7022       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
7023       V = ShuffleBuilder.finalize(V);
7024 
7025       E->VectorizedValue = V;
7026       ++NumVectorInstructions;
7027 
7028       return V;
7029     }
7030     case Instruction::Load: {
7031       // Loads are inserted at the head of the tree because we don't want to
7032       // sink them all the way down past store instructions.
7033       setInsertPointAfterBundle(E);
7034 
7035       LoadInst *LI = cast<LoadInst>(VL0);
7036       Instruction *NewLI;
7037       unsigned AS = LI->getPointerAddressSpace();
7038       Value *PO = LI->getPointerOperand();
7039       if (E->State == TreeEntry::Vectorize) {
7040         Value *VecPtr = Builder.CreateBitCast(PO, VecTy->getPointerTo(AS));
7041         NewLI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign());
7042 
7043         // The pointer operand uses an in-tree scalar so we add the new BitCast
7044         // or LoadInst to ExternalUses list to make sure that an extract will
7045         // be generated in the future.
7046         if (TreeEntry *Entry = getTreeEntry(PO)) {
7047           // Find which lane we need to extract.
7048           unsigned FoundLane = Entry->findLaneForValue(PO);
7049           ExternalUses.emplace_back(
7050               PO, PO != VecPtr ? cast<User>(VecPtr) : NewLI, FoundLane);
7051         }
7052       } else {
7053         assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state");
7054         Value *VecPtr = vectorizeTree(E->getOperand(0));
7055         // Use the minimum alignment of the gathered loads.
7056         Align CommonAlignment = LI->getAlign();
7057         for (Value *V : E->Scalars)
7058           CommonAlignment =
7059               commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign());
7060         NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment);
7061       }
7062 
7063       if (MSSA) {
7064         MemorySSAUpdater MSSAU(MSSA);
7065         auto *Access = MSSA->getMemoryAccess(LI);
7066         assert(Access);
7067         MemoryUseOrDef *NewAccess =
7068           MSSAU.createMemoryAccessAfter(NewLI, Access->getDefiningAccess(),
7069                                         Access);
7070         MSSAU.insertUse(cast<MemoryUse>(NewAccess), true);
7071       }
7072 
7073       Value *V = propagateMetadata(NewLI, E->Scalars);
7074 
7075       ShuffleBuilder.addInversedMask(E->ReorderIndices);
7076       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
7077       V = ShuffleBuilder.finalize(V);
7078       E->VectorizedValue = V;
7079       ++NumVectorInstructions;
7080       return V;
7081     }
7082     case Instruction::Store: {
7083       auto *SI = cast<StoreInst>(VL0);
7084       unsigned AS = SI->getPointerAddressSpace();
7085 
7086       setInsertPointAfterBundle(E);
7087 
7088       Value *VecValue = vectorizeTree(E->getOperand(0));
7089       ShuffleBuilder.addMask(E->ReorderIndices);
7090       VecValue = ShuffleBuilder.finalize(VecValue);
7091 
7092       Value *ScalarPtr = SI->getPointerOperand();
7093       Value *VecPtr = Builder.CreateBitCast(
7094           ScalarPtr, VecValue->getType()->getPointerTo(AS));
7095       StoreInst *ST =
7096           Builder.CreateAlignedStore(VecValue, VecPtr, SI->getAlign());
7097 
7098       if (MSSA) {
7099         MemorySSAUpdater MSSAU(MSSA);
7100         auto *Access = MSSA->getMemoryAccess(SI);
7101         assert(Access);
7102         MemoryUseOrDef *NewAccess =
7103           MSSAU.createMemoryAccessAfter(ST, Access->getDefiningAccess(),
7104                                         Access);
7105         MSSAU.insertDef(cast<MemoryDef>(NewAccess), true);
7106       }
7107 
7108       // The pointer operand uses an in-tree scalar, so add the new BitCast or
7109       // StoreInst to ExternalUses to make sure that an extract will be
7110       // generated in the future.
7111       if (TreeEntry *Entry = getTreeEntry(ScalarPtr)) {
7112         // Find which lane we need to extract.
7113         unsigned FoundLane = Entry->findLaneForValue(ScalarPtr);
7114         ExternalUses.push_back(ExternalUser(
7115             ScalarPtr, ScalarPtr != VecPtr ? cast<User>(VecPtr) : ST,
7116             FoundLane));
7117       }
7118 
7119       Value *V = propagateMetadata(ST, E->Scalars);
7120 
7121       E->VectorizedValue = V;
7122       ++NumVectorInstructions;
7123       return V;
7124     }
7125     case Instruction::GetElementPtr: {
7126       auto *GEP0 = cast<GetElementPtrInst>(VL0);
7127       setInsertPointAfterBundle(E);
7128 
7129       Value *Op0 = vectorizeTree(E->getOperand(0));
7130 
7131       SmallVector<Value *> OpVecs;
7132       for (int J = 1, N = GEP0->getNumOperands(); J < N; ++J) {
7133         Value *OpVec = vectorizeTree(E->getOperand(J));
7134         OpVecs.push_back(OpVec);
7135       }
7136 
7137       Value *V = Builder.CreateGEP(GEP0->getSourceElementType(), Op0, OpVecs);
7138       if (Instruction *I = dyn_cast<Instruction>(V))
7139         V = propagateMetadata(I, E->Scalars);
7140 
7141       ShuffleBuilder.addInversedMask(E->ReorderIndices);
7142       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
7143       V = ShuffleBuilder.finalize(V);
7144 
7145       E->VectorizedValue = V;
7146       ++NumVectorInstructions;
7147 
7148       return V;
7149     }
7150     case Instruction::Call: {
7151       CallInst *CI = cast<CallInst>(VL0);
7152       setInsertPointAfterBundle(E);
7153 
7154       Intrinsic::ID IID  = Intrinsic::not_intrinsic;
7155       if (Function *FI = CI->getCalledFunction())
7156         IID = FI->getIntrinsicID();
7157 
7158       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
7159 
7160       auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI);
7161       bool UseIntrinsic = ID != Intrinsic::not_intrinsic &&
7162                           VecCallCosts.first <= VecCallCosts.second;
7163 
7164       Value *ScalarArg = nullptr;
7165       std::vector<Value *> OpVecs;
7166       SmallVector<Type *, 2> TysForDecl =
7167           {FixedVectorType::get(CI->getType(), E->Scalars.size())};
7168       for (int j = 0, e = CI->arg_size(); j < e; ++j) {
7169         ValueList OpVL;
7170         // Some intrinsics have scalar arguments. This argument should not be
7171         // vectorized.
7172         if (UseIntrinsic && hasVectorInstrinsicScalarOpd(IID, j)) {
7173           CallInst *CEI = cast<CallInst>(VL0);
7174           ScalarArg = CEI->getArgOperand(j);
7175           OpVecs.push_back(CEI->getArgOperand(j));
7176           if (hasVectorInstrinsicOverloadedScalarOpd(IID, j))
7177             TysForDecl.push_back(ScalarArg->getType());
7178           continue;
7179         }
7180 
7181         Value *OpVec = vectorizeTree(E->getOperand(j));
7182         LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n");
7183         OpVecs.push_back(OpVec);
7184       }
7185 
7186       Function *CF;
7187       if (!UseIntrinsic) {
7188         VFShape Shape =
7189             VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>(
7190                                   VecTy->getNumElements())),
7191                          false /*HasGlobalPred*/);
7192         CF = VFDatabase(*CI).getVectorizedFunction(Shape);
7193       } else {
7194         CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl);
7195       }
7196 
7197       SmallVector<OperandBundleDef, 1> OpBundles;
7198       CI->getOperandBundlesAsDefs(OpBundles);
7199       Value *V = Builder.CreateCall(CF, OpVecs, OpBundles);
7200 
7201       // The scalar argument uses an in-tree scalar so we add the new vectorized
7202       // call to ExternalUses list to make sure that an extract will be
7203       // generated in the future.
7204       if (ScalarArg) {
7205         if (TreeEntry *Entry = getTreeEntry(ScalarArg)) {
7206           // Find which lane we need to extract.
7207           unsigned FoundLane = Entry->findLaneForValue(ScalarArg);
7208           ExternalUses.push_back(
7209               ExternalUser(ScalarArg, cast<User>(V), FoundLane));
7210         }
7211       }
7212 
7213       propagateIRFlags(V, E->Scalars, VL0);
7214       ShuffleBuilder.addInversedMask(E->ReorderIndices);
7215       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
7216       V = ShuffleBuilder.finalize(V);
7217 
7218       E->VectorizedValue = V;
7219       ++NumVectorInstructions;
7220       return V;
7221     }
7222     case Instruction::ShuffleVector: {
7223       assert(E->isAltShuffle() &&
7224              ((Instruction::isBinaryOp(E->getOpcode()) &&
7225                Instruction::isBinaryOp(E->getAltOpcode())) ||
7226               (Instruction::isCast(E->getOpcode()) &&
7227                Instruction::isCast(E->getAltOpcode())) ||
7228               (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) &&
7229              "Invalid Shuffle Vector Operand");
7230 
7231       Value *LHS = nullptr, *RHS = nullptr;
7232       if (Instruction::isBinaryOp(E->getOpcode()) || isa<CmpInst>(VL0)) {
7233         setInsertPointAfterBundle(E);
7234         LHS = vectorizeTree(E->getOperand(0));
7235         RHS = vectorizeTree(E->getOperand(1));
7236       } else {
7237         setInsertPointAfterBundle(E);
7238         LHS = vectorizeTree(E->getOperand(0));
7239       }
7240 
7241       if (E->VectorizedValue) {
7242         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
7243         return E->VectorizedValue;
7244       }
7245 
7246       Value *V0, *V1;
7247       if (Instruction::isBinaryOp(E->getOpcode())) {
7248         V0 = Builder.CreateBinOp(
7249             static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS);
7250         V1 = Builder.CreateBinOp(
7251             static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS);
7252       } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) {
7253         V0 = Builder.CreateCmp(CI0->getPredicate(), LHS, RHS);
7254         auto *AltCI = cast<CmpInst>(E->getAltOp());
7255         CmpInst::Predicate AltPred = AltCI->getPredicate();
7256         V1 = Builder.CreateCmp(AltPred, LHS, RHS);
7257       } else {
7258         V0 = Builder.CreateCast(
7259             static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy);
7260         V1 = Builder.CreateCast(
7261             static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy);
7262       }
7263       // Add V0 and V1 to later analysis to try to find and remove matching
7264       // instruction, if any.
7265       for (Value *V : {V0, V1}) {
7266         if (auto *I = dyn_cast<Instruction>(V)) {
7267           GatherShuffleSeq.insert(I);
7268           CSEBlocks.insert(I->getParent());
7269         }
7270       }
7271 
7272       // Create shuffle to take alternate operations from the vector.
7273       // Also, gather up main and alt scalar ops to propagate IR flags to
7274       // each vector operation.
7275       ValueList OpScalars, AltScalars;
7276       SmallVector<int> Mask;
7277       buildShuffleEntryMask(
7278           E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices,
7279           [E](Instruction *I) {
7280             assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode");
7281             return isAlternateInstruction(I, E->getMainOp(), E->getAltOp());
7282           },
7283           Mask, &OpScalars, &AltScalars);
7284 
7285       propagateIRFlags(V0, OpScalars);
7286       propagateIRFlags(V1, AltScalars);
7287 
7288       Value *V = Builder.CreateShuffleVector(V0, V1, Mask);
7289       if (auto *I = dyn_cast<Instruction>(V)) {
7290         V = propagateMetadata(I, E->Scalars);
7291         GatherShuffleSeq.insert(I);
7292         CSEBlocks.insert(I->getParent());
7293       }
7294       V = ShuffleBuilder.finalize(V);
7295 
7296       E->VectorizedValue = V;
7297       ++NumVectorInstructions;
7298 
7299       return V;
7300     }
7301     default:
7302     llvm_unreachable("unknown inst");
7303   }
7304   return nullptr;
7305 }
7306 
7307 Value *BoUpSLP::vectorizeTree() {
7308   ExtraValueToDebugLocsMap ExternallyUsedValues;
7309   return vectorizeTree(ExternallyUsedValues);
7310 }
7311 
7312 Value *
7313 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) {
7314   // All blocks must be scheduled before any instructions are inserted.
7315   for (auto &BSIter : BlocksSchedules) {
7316     scheduleBlock(BSIter.second.get());
7317   }
7318 
7319   Builder.SetInsertPoint(&F->getEntryBlock().front());
7320   auto *VectorRoot = vectorizeTree(VectorizableTree[0].get());
7321 
7322   // If the vectorized tree can be rewritten in a smaller type, we truncate the
7323   // vectorized root. InstCombine will then rewrite the entire expression. We
7324   // sign extend the extracted values below.
7325   auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
7326   if (MinBWs.count(ScalarRoot)) {
7327     if (auto *I = dyn_cast<Instruction>(VectorRoot)) {
7328       // If current instr is a phi and not the last phi, insert it after the
7329       // last phi node.
7330       if (isa<PHINode>(I))
7331         Builder.SetInsertPoint(&*I->getParent()->getFirstInsertionPt());
7332       else
7333         Builder.SetInsertPoint(&*++BasicBlock::iterator(I));
7334     }
7335     auto BundleWidth = VectorizableTree[0]->Scalars.size();
7336     auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
7337     auto *VecTy = FixedVectorType::get(MinTy, BundleWidth);
7338     auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy);
7339     VectorizableTree[0]->VectorizedValue = Trunc;
7340   }
7341 
7342   LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size()
7343                     << " values .\n");
7344 
7345   // Extract all of the elements with the external uses.
7346   for (const auto &ExternalUse : ExternalUses) {
7347     Value *Scalar = ExternalUse.Scalar;
7348     llvm::User *User = ExternalUse.User;
7349 
7350     // Skip users that we already RAUW. This happens when one instruction
7351     // has multiple uses of the same value.
7352     if (User && !is_contained(Scalar->users(), User))
7353       continue;
7354     TreeEntry *E = getTreeEntry(Scalar);
7355     assert(E && "Invalid scalar");
7356     assert(E->State != TreeEntry::NeedToGather &&
7357            "Extracting from a gather list");
7358 
7359     Value *Vec = E->VectorizedValue;
7360     assert(Vec && "Can't find vectorizable value");
7361 
7362     Value *Lane = Builder.getInt32(ExternalUse.Lane);
7363     auto ExtractAndExtendIfNeeded = [&](Value *Vec) {
7364       if (Scalar->getType() != Vec->getType()) {
7365         Value *Ex;
7366         // "Reuse" the existing extract to improve final codegen.
7367         if (auto *ES = dyn_cast<ExtractElementInst>(Scalar)) {
7368           Ex = Builder.CreateExtractElement(ES->getOperand(0),
7369                                             ES->getOperand(1));
7370         } else {
7371           Ex = Builder.CreateExtractElement(Vec, Lane);
7372         }
7373         // If necessary, sign-extend or zero-extend ScalarRoot
7374         // to the larger type.
7375         if (!MinBWs.count(ScalarRoot))
7376           return Ex;
7377         if (MinBWs[ScalarRoot].second)
7378           return Builder.CreateSExt(Ex, Scalar->getType());
7379         return Builder.CreateZExt(Ex, Scalar->getType());
7380       }
7381       assert(isa<FixedVectorType>(Scalar->getType()) &&
7382              isa<InsertElementInst>(Scalar) &&
7383              "In-tree scalar of vector type is not insertelement?");
7384       return Vec;
7385     };
7386     // If User == nullptr, the Scalar is used as extra arg. Generate
7387     // ExtractElement instruction and update the record for this scalar in
7388     // ExternallyUsedValues.
7389     if (!User) {
7390       assert(ExternallyUsedValues.count(Scalar) &&
7391              "Scalar with nullptr as an external user must be registered in "
7392              "ExternallyUsedValues map");
7393       if (auto *VecI = dyn_cast<Instruction>(Vec)) {
7394         Builder.SetInsertPoint(VecI->getParent(),
7395                                std::next(VecI->getIterator()));
7396       } else {
7397         Builder.SetInsertPoint(&F->getEntryBlock().front());
7398       }
7399       Value *NewInst = ExtractAndExtendIfNeeded(Vec);
7400       CSEBlocks.insert(cast<Instruction>(Scalar)->getParent());
7401       auto &NewInstLocs = ExternallyUsedValues[NewInst];
7402       auto It = ExternallyUsedValues.find(Scalar);
7403       assert(It != ExternallyUsedValues.end() &&
7404              "Externally used scalar is not found in ExternallyUsedValues");
7405       NewInstLocs.append(It->second);
7406       ExternallyUsedValues.erase(Scalar);
7407       // Required to update internally referenced instructions.
7408       Scalar->replaceAllUsesWith(NewInst);
7409       continue;
7410     }
7411 
7412     // Generate extracts for out-of-tree users.
7413     // Find the insertion point for the extractelement lane.
7414     if (auto *VecI = dyn_cast<Instruction>(Vec)) {
7415       if (PHINode *PH = dyn_cast<PHINode>(User)) {
7416         for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) {
7417           if (PH->getIncomingValue(i) == Scalar) {
7418             Instruction *IncomingTerminator =
7419                 PH->getIncomingBlock(i)->getTerminator();
7420             if (isa<CatchSwitchInst>(IncomingTerminator)) {
7421               Builder.SetInsertPoint(VecI->getParent(),
7422                                      std::next(VecI->getIterator()));
7423             } else {
7424               Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator());
7425             }
7426             Value *NewInst = ExtractAndExtendIfNeeded(Vec);
7427             CSEBlocks.insert(PH->getIncomingBlock(i));
7428             PH->setOperand(i, NewInst);
7429           }
7430         }
7431       } else {
7432         Builder.SetInsertPoint(cast<Instruction>(User));
7433         Value *NewInst = ExtractAndExtendIfNeeded(Vec);
7434         CSEBlocks.insert(cast<Instruction>(User)->getParent());
7435         User->replaceUsesOfWith(Scalar, NewInst);
7436       }
7437     } else {
7438       Builder.SetInsertPoint(&F->getEntryBlock().front());
7439       Value *NewInst = ExtractAndExtendIfNeeded(Vec);
7440       CSEBlocks.insert(&F->getEntryBlock());
7441       User->replaceUsesOfWith(Scalar, NewInst);
7442     }
7443 
7444     LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n");
7445   }
7446 
7447   // For each vectorized value:
7448   for (auto &TEPtr : VectorizableTree) {
7449     TreeEntry *Entry = TEPtr.get();
7450 
7451     // No need to handle users of gathered values.
7452     if (Entry->State == TreeEntry::NeedToGather)
7453       continue;
7454 
7455     assert(Entry->VectorizedValue && "Can't find vectorizable value");
7456 
7457     // For each lane:
7458     for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
7459       Value *Scalar = Entry->Scalars[Lane];
7460 
7461 #ifndef NDEBUG
7462       Type *Ty = Scalar->getType();
7463       if (!Ty->isVoidTy()) {
7464         for (User *U : Scalar->users()) {
7465           LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
7466 
7467           // It is legal to delete users in the ignorelist.
7468           assert((getTreeEntry(U) || is_contained(UserIgnoreList, U) ||
7469                   (isa_and_nonnull<Instruction>(U) &&
7470                    isDeleted(cast<Instruction>(U)))) &&
7471                  "Deleting out-of-tree value");
7472         }
7473       }
7474 #endif
7475       LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
7476       eraseInstruction(cast<Instruction>(Scalar));
7477     }
7478   }
7479 
7480   Builder.ClearInsertionPoint();
7481   InstrElementSize.clear();
7482 
7483   return VectorizableTree[0]->VectorizedValue;
7484 }
7485 
7486 void BoUpSLP::optimizeGatherSequence() {
7487   LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherShuffleSeq.size()
7488                     << " gather sequences instructions.\n");
7489   // LICM InsertElementInst sequences.
7490   for (Instruction *I : GatherShuffleSeq) {
7491     if (isDeleted(I))
7492       continue;
7493 
7494     // Check if this block is inside a loop.
7495     Loop *L = LI->getLoopFor(I->getParent());
7496     if (!L)
7497       continue;
7498 
7499     // Check if it has a preheader.
7500     BasicBlock *PreHeader = L->getLoopPreheader();
7501     if (!PreHeader)
7502       continue;
7503 
7504     // If the vector or the element that we insert into it are
7505     // instructions that are defined in this basic block then we can't
7506     // hoist this instruction.
7507     if (any_of(I->operands(), [L](Value *V) {
7508           auto *OpI = dyn_cast<Instruction>(V);
7509           return OpI && L->contains(OpI);
7510         }))
7511       continue;
7512 
7513     // We can hoist this instruction. Move it to the pre-header.
7514     I->moveBefore(PreHeader->getTerminator());
7515   }
7516 
7517   // Make a list of all reachable blocks in our CSE queue.
7518   SmallVector<const DomTreeNode *, 8> CSEWorkList;
7519   CSEWorkList.reserve(CSEBlocks.size());
7520   for (BasicBlock *BB : CSEBlocks)
7521     if (DomTreeNode *N = DT->getNode(BB)) {
7522       assert(DT->isReachableFromEntry(N));
7523       CSEWorkList.push_back(N);
7524     }
7525 
7526   // Sort blocks by domination. This ensures we visit a block after all blocks
7527   // dominating it are visited.
7528   llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) {
7529     assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) &&
7530            "Different nodes should have different DFS numbers");
7531     return A->getDFSNumIn() < B->getDFSNumIn();
7532   });
7533 
7534   // Less defined shuffles can be replaced by the more defined copies.
7535   // Between two shuffles one is less defined if it has the same vector operands
7536   // and its mask indeces are the same as in the first one or undefs. E.g.
7537   // shuffle %0, poison, <0, 0, 0, undef> is less defined than shuffle %0,
7538   // poison, <0, 0, 0, 0>.
7539   auto &&IsIdenticalOrLessDefined = [this](Instruction *I1, Instruction *I2,
7540                                            SmallVectorImpl<int> &NewMask) {
7541     if (I1->getType() != I2->getType())
7542       return false;
7543     auto *SI1 = dyn_cast<ShuffleVectorInst>(I1);
7544     auto *SI2 = dyn_cast<ShuffleVectorInst>(I2);
7545     if (!SI1 || !SI2)
7546       return I1->isIdenticalTo(I2);
7547     if (SI1->isIdenticalTo(SI2))
7548       return true;
7549     for (int I = 0, E = SI1->getNumOperands(); I < E; ++I)
7550       if (SI1->getOperand(I) != SI2->getOperand(I))
7551         return false;
7552     // Check if the second instruction is more defined than the first one.
7553     NewMask.assign(SI2->getShuffleMask().begin(), SI2->getShuffleMask().end());
7554     ArrayRef<int> SM1 = SI1->getShuffleMask();
7555     // Count trailing undefs in the mask to check the final number of used
7556     // registers.
7557     unsigned LastUndefsCnt = 0;
7558     for (int I = 0, E = NewMask.size(); I < E; ++I) {
7559       if (SM1[I] == UndefMaskElem)
7560         ++LastUndefsCnt;
7561       else
7562         LastUndefsCnt = 0;
7563       if (NewMask[I] != UndefMaskElem && SM1[I] != UndefMaskElem &&
7564           NewMask[I] != SM1[I])
7565         return false;
7566       if (NewMask[I] == UndefMaskElem)
7567         NewMask[I] = SM1[I];
7568     }
7569     // Check if the last undefs actually change the final number of used vector
7570     // registers.
7571     return SM1.size() - LastUndefsCnt > 1 &&
7572            TTI->getNumberOfParts(SI1->getType()) ==
7573                TTI->getNumberOfParts(
7574                    FixedVectorType::get(SI1->getType()->getElementType(),
7575                                         SM1.size() - LastUndefsCnt));
7576   };
7577   // Perform O(N^2) search over the gather/shuffle sequences and merge identical
7578   // instructions. TODO: We can further optimize this scan if we split the
7579   // instructions into different buckets based on the insert lane.
7580   SmallVector<Instruction *, 16> Visited;
7581   for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) {
7582     assert(*I &&
7583            (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&
7584            "Worklist not sorted properly!");
7585     BasicBlock *BB = (*I)->getBlock();
7586     // For all instructions in blocks containing gather sequences:
7587     for (Instruction &In : llvm::make_early_inc_range(*BB)) {
7588       if (isDeleted(&In))
7589         continue;
7590       if (!isa<InsertElementInst>(&In) && !isa<ExtractElementInst>(&In) &&
7591           !isa<ShuffleVectorInst>(&In) && !GatherShuffleSeq.contains(&In))
7592         continue;
7593 
7594       // Check if we can replace this instruction with any of the
7595       // visited instructions.
7596       bool Replaced = false;
7597       for (Instruction *&V : Visited) {
7598         SmallVector<int> NewMask;
7599         if (IsIdenticalOrLessDefined(&In, V, NewMask) &&
7600             DT->dominates(V->getParent(), In.getParent())) {
7601           In.replaceAllUsesWith(V);
7602           eraseInstruction(&In);
7603           if (auto *SI = dyn_cast<ShuffleVectorInst>(V))
7604             if (!NewMask.empty())
7605               SI->setShuffleMask(NewMask);
7606           Replaced = true;
7607           break;
7608         }
7609         if (isa<ShuffleVectorInst>(In) && isa<ShuffleVectorInst>(V) &&
7610             GatherShuffleSeq.contains(V) &&
7611             IsIdenticalOrLessDefined(V, &In, NewMask) &&
7612             DT->dominates(In.getParent(), V->getParent())) {
7613           In.moveAfter(V);
7614           V->replaceAllUsesWith(&In);
7615           eraseInstruction(V);
7616           if (auto *SI = dyn_cast<ShuffleVectorInst>(&In))
7617             if (!NewMask.empty())
7618               SI->setShuffleMask(NewMask);
7619           V = &In;
7620           Replaced = true;
7621           break;
7622         }
7623       }
7624       if (!Replaced) {
7625         assert(!is_contained(Visited, &In));
7626         Visited.push_back(&In);
7627       }
7628     }
7629   }
7630   CSEBlocks.clear();
7631   GatherShuffleSeq.clear();
7632 }
7633 
7634 BoUpSLP::ScheduleData *
7635 BoUpSLP::BlockScheduling::buildBundle(ArrayRef<Value *> VL) {
7636   ScheduleData *Bundle = nullptr;
7637   ScheduleData *PrevInBundle = nullptr;
7638   for (Value *V : VL) {
7639     ScheduleData *BundleMember = getScheduleData(V);
7640     assert(BundleMember &&
7641            "no ScheduleData for bundle member "
7642            "(maybe not in same basic block)");
7643     assert(BundleMember->isSchedulingEntity() &&
7644            "bundle member already part of other bundle");
7645     if (PrevInBundle) {
7646       PrevInBundle->NextInBundle = BundleMember;
7647     } else {
7648       Bundle = BundleMember;
7649     }
7650 
7651     // Group the instructions to a bundle.
7652     BundleMember->FirstInBundle = Bundle;
7653     PrevInBundle = BundleMember;
7654   }
7655   assert(Bundle && "Failed to find schedule bundle");
7656   return Bundle;
7657 }
7658 
7659 // Groups the instructions to a bundle (which is then a single scheduling entity)
7660 // and schedules instructions until the bundle gets ready.
7661 Optional<BoUpSLP::ScheduleData *>
7662 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
7663                                             const InstructionsState &S) {
7664   // No need to schedule PHIs, insertelement, extractelement and extractvalue
7665   // instructions.
7666   if (isa<PHINode>(S.OpValue) || isVectorLikeInstWithConstOps(S.OpValue))
7667     return nullptr;
7668 
7669   // Initialize the instruction bundle.
7670   Instruction *OldScheduleEnd = ScheduleEnd;
7671   LLVM_DEBUG(dbgs() << "SLP:  bundle: " << *S.OpValue << "\n");
7672 
7673   auto TryScheduleBundleImpl = [this, OldScheduleEnd, SLP](bool ReSchedule,
7674                                                          ScheduleData *Bundle) {
7675     // The scheduling region got new instructions at the lower end (or it is a
7676     // new region for the first bundle). This makes it necessary to
7677     // recalculate all dependencies.
7678     // It is seldom that this needs to be done a second time after adding the
7679     // initial bundle to the region.
7680     if (ScheduleEnd != OldScheduleEnd) {
7681       for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode())
7682         doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); });
7683       ReSchedule = true;
7684     }
7685     if (Bundle) {
7686       LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle
7687                         << " in block " << BB->getName() << "\n");
7688       calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP);
7689     }
7690 
7691     if (ReSchedule) {
7692       resetSchedule();
7693       initialFillReadyList(ReadyInsts);
7694     }
7695 
7696     // Now try to schedule the new bundle or (if no bundle) just calculate
7697     // dependencies. As soon as the bundle is "ready" it means that there are no
7698     // cyclic dependencies and we can schedule it. Note that's important that we
7699     // don't "schedule" the bundle yet (see cancelScheduling).
7700     while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) &&
7701            !ReadyInsts.empty()) {
7702       ScheduleData *Picked = ReadyInsts.pop_back_val();
7703       assert(Picked->isSchedulingEntity() && Picked->isReady() &&
7704              "must be ready to schedule");
7705       schedule(Picked, ReadyInsts);
7706     }
7707   };
7708 
7709   // Make sure that the scheduling region contains all
7710   // instructions of the bundle.
7711   for (Value *V : VL) {
7712     if (!extendSchedulingRegion(V, S)) {
7713       // If the scheduling region got new instructions at the lower end (or it
7714       // is a new region for the first bundle). This makes it necessary to
7715       // recalculate all dependencies.
7716       // Otherwise the compiler may crash trying to incorrectly calculate
7717       // dependencies and emit instruction in the wrong order at the actual
7718       // scheduling.
7719       TryScheduleBundleImpl(/*ReSchedule=*/false, nullptr);
7720       return None;
7721     }
7722   }
7723 
7724   bool ReSchedule = false;
7725   for (Value *V : VL) {
7726     ScheduleData *BundleMember = getScheduleData(V);
7727     assert(BundleMember &&
7728            "no ScheduleData for bundle member (maybe not in same basic block)");
7729 
7730     // Make sure we don't leave the pieces of the bundle in the ready list when
7731     // whole bundle might not be ready.
7732     ReadyInsts.remove(BundleMember);
7733 
7734     if (!BundleMember->IsScheduled)
7735       continue;
7736     // A bundle member was scheduled as single instruction before and now
7737     // needs to be scheduled as part of the bundle. We just get rid of the
7738     // existing schedule.
7739     LLVM_DEBUG(dbgs() << "SLP:  reset schedule because " << *BundleMember
7740                       << " was already scheduled\n");
7741     ReSchedule = true;
7742   }
7743 
7744   auto *Bundle = buildBundle(VL);
7745   TryScheduleBundleImpl(ReSchedule, Bundle);
7746   if (!Bundle->isReady()) {
7747     cancelScheduling(VL, S.OpValue);
7748     return None;
7749   }
7750   return Bundle;
7751 }
7752 
7753 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL,
7754                                                 Value *OpValue) {
7755   if (isa<PHINode>(OpValue) || isVectorLikeInstWithConstOps(OpValue))
7756     return;
7757 
7758   ScheduleData *Bundle = getScheduleData(OpValue);
7759   LLVM_DEBUG(dbgs() << "SLP:  cancel scheduling of " << *Bundle << "\n");
7760   assert(!Bundle->IsScheduled &&
7761          "Can't cancel bundle which is already scheduled");
7762   assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() &&
7763          "tried to unbundle something which is not a bundle");
7764 
7765   // Remove the bundle from the ready list.
7766   if (Bundle->isReady())
7767     ReadyInsts.remove(Bundle);
7768 
7769   // Un-bundle: make single instructions out of the bundle.
7770   ScheduleData *BundleMember = Bundle;
7771   while (BundleMember) {
7772     assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links");
7773     BundleMember->FirstInBundle = BundleMember;
7774     ScheduleData *Next = BundleMember->NextInBundle;
7775     BundleMember->NextInBundle = nullptr;
7776     if (BundleMember->unscheduledDepsInBundle() == 0) {
7777       ReadyInsts.insert(BundleMember);
7778     }
7779     BundleMember = Next;
7780   }
7781 }
7782 
7783 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() {
7784   // Allocate a new ScheduleData for the instruction.
7785   if (ChunkPos >= ChunkSize) {
7786     ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize));
7787     ChunkPos = 0;
7788   }
7789   return &(ScheduleDataChunks.back()[ChunkPos++]);
7790 }
7791 
7792 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V,
7793                                                       const InstructionsState &S) {
7794   if (getScheduleData(V, isOneOf(S, V)))
7795     return true;
7796   Instruction *I = dyn_cast<Instruction>(V);
7797   assert(I && "bundle member must be an instruction");
7798   assert(!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) &&
7799          "phi nodes/insertelements/extractelements/extractvalues don't need to "
7800          "be scheduled");
7801   auto &&CheckScheduleForI = [this, &S](Instruction *I) -> bool {
7802     ScheduleData *ISD = getScheduleData(I);
7803     if (!ISD)
7804       return false;
7805     assert(isInSchedulingRegion(ISD) &&
7806            "ScheduleData not in scheduling region");
7807     ScheduleData *SD = allocateScheduleDataChunks();
7808     SD->Inst = I;
7809     SD->init(SchedulingRegionID, S.OpValue);
7810     ExtraScheduleDataMap[I][S.OpValue] = SD;
7811     return true;
7812   };
7813   if (CheckScheduleForI(I))
7814     return true;
7815   if (!ScheduleStart) {
7816     // It's the first instruction in the new region.
7817     initScheduleData(I, I->getNextNode(), nullptr, nullptr);
7818     ScheduleStart = I;
7819     ScheduleEnd = I->getNextNode();
7820     if (isOneOf(S, I) != I)
7821       CheckScheduleForI(I);
7822     assert(ScheduleEnd && "tried to vectorize a terminator?");
7823     LLVM_DEBUG(dbgs() << "SLP:  initialize schedule region to " << *I << "\n");
7824     return true;
7825   }
7826   // Search up and down at the same time, because we don't know if the new
7827   // instruction is above or below the existing scheduling region.
7828   BasicBlock::reverse_iterator UpIter =
7829       ++ScheduleStart->getIterator().getReverse();
7830   BasicBlock::reverse_iterator UpperEnd = BB->rend();
7831   BasicBlock::iterator DownIter = ScheduleEnd->getIterator();
7832   BasicBlock::iterator LowerEnd = BB->end();
7833   while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I &&
7834          &*DownIter != I) {
7835     if (++ScheduleRegionSize > ScheduleRegionSizeLimit) {
7836       LLVM_DEBUG(dbgs() << "SLP:  exceeded schedule region size limit\n");
7837       return false;
7838     }
7839 
7840     ++UpIter;
7841     ++DownIter;
7842   }
7843   if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) {
7844     assert(I->getParent() == ScheduleStart->getParent() &&
7845            "Instruction is in wrong basic block.");
7846     initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion);
7847     ScheduleStart = I;
7848     if (isOneOf(S, I) != I)
7849       CheckScheduleForI(I);
7850     LLVM_DEBUG(dbgs() << "SLP:  extend schedule region start to " << *I
7851                       << "\n");
7852     return true;
7853   }
7854   assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) &&
7855          "Expected to reach top of the basic block or instruction down the "
7856          "lower end.");
7857   assert(I->getParent() == ScheduleEnd->getParent() &&
7858          "Instruction is in wrong basic block.");
7859   initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion,
7860                    nullptr);
7861   ScheduleEnd = I->getNextNode();
7862   if (isOneOf(S, I) != I)
7863     CheckScheduleForI(I);
7864   assert(ScheduleEnd && "tried to vectorize a terminator?");
7865   LLVM_DEBUG(dbgs() << "SLP:  extend schedule region end to " << *I << "\n");
7866   return true;
7867 }
7868 
7869 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI,
7870                                                 Instruction *ToI,
7871                                                 ScheduleData *PrevLoadStore,
7872                                                 ScheduleData *NextLoadStore) {
7873   ScheduleData *CurrentLoadStore = PrevLoadStore;
7874   for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) {
7875     ScheduleData *SD = ScheduleDataMap[I];
7876     if (!SD) {
7877       SD = allocateScheduleDataChunks();
7878       ScheduleDataMap[I] = SD;
7879       SD->Inst = I;
7880     }
7881     assert(!isInSchedulingRegion(SD) &&
7882            "new ScheduleData already in scheduling region");
7883     SD->init(SchedulingRegionID, I);
7884 
7885     if (I->mayReadOrWriteMemory() &&
7886         (!isa<IntrinsicInst>(I) ||
7887          (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect &&
7888           cast<IntrinsicInst>(I)->getIntrinsicID() !=
7889               Intrinsic::pseudoprobe))) {
7890       // Update the linked list of memory accessing instructions.
7891       if (CurrentLoadStore) {
7892         CurrentLoadStore->NextLoadStore = SD;
7893       } else {
7894         FirstLoadStoreInRegion = SD;
7895       }
7896       CurrentLoadStore = SD;
7897     }
7898   }
7899   if (NextLoadStore) {
7900     if (CurrentLoadStore)
7901       CurrentLoadStore->NextLoadStore = NextLoadStore;
7902   } else {
7903     LastLoadStoreInRegion = CurrentLoadStore;
7904   }
7905 }
7906 
7907 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
7908                                                      bool InsertInReadyList,
7909                                                      BoUpSLP *SLP) {
7910   assert(SD->isSchedulingEntity());
7911 
7912   SmallVector<ScheduleData *, 10> WorkList;
7913   WorkList.push_back(SD);
7914 
7915   while (!WorkList.empty()) {
7916     ScheduleData *SD = WorkList.pop_back_val();
7917     for (ScheduleData *BundleMember = SD; BundleMember;
7918          BundleMember = BundleMember->NextInBundle) {
7919       assert(isInSchedulingRegion(BundleMember));
7920       if (BundleMember->hasValidDependencies())
7921         continue;
7922 
7923       LLVM_DEBUG(dbgs() << "SLP:       update deps of " << *BundleMember
7924                  << "\n");
7925       BundleMember->Dependencies = 0;
7926       BundleMember->resetUnscheduledDeps();
7927 
7928       // Handle def-use chain dependencies.
7929       if (BundleMember->OpValue != BundleMember->Inst) {
7930         if (ScheduleData *UseSD = getScheduleData(BundleMember->Inst)) {
7931           BundleMember->Dependencies++;
7932           ScheduleData *DestBundle = UseSD->FirstInBundle;
7933           if (!DestBundle->IsScheduled)
7934             BundleMember->incrementUnscheduledDeps(1);
7935           if (!DestBundle->hasValidDependencies())
7936             WorkList.push_back(DestBundle);
7937         }
7938       } else {
7939         for (User *U : BundleMember->Inst->users()) {
7940           if (ScheduleData *UseSD = getScheduleData(cast<Instruction>(U))) {
7941             BundleMember->Dependencies++;
7942             ScheduleData *DestBundle = UseSD->FirstInBundle;
7943             if (!DestBundle->IsScheduled)
7944               BundleMember->incrementUnscheduledDeps(1);
7945             if (!DestBundle->hasValidDependencies())
7946               WorkList.push_back(DestBundle);
7947           }
7948         }
7949       }
7950 
7951       // Handle the memory dependencies (if any).
7952       ScheduleData *DepDest = BundleMember->NextLoadStore;
7953       if (!DepDest)
7954         continue;
7955       Instruction *SrcInst = BundleMember->Inst;
7956       assert(SrcInst->mayReadOrWriteMemory() &&
7957              "NextLoadStore list for non memory effecting bundle?");
7958       MemoryLocation SrcLoc = getLocation(SrcInst);
7959       bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
7960       unsigned numAliased = 0;
7961       unsigned DistToSrc = 1;
7962 
7963       for ( ; DepDest; DepDest = DepDest->NextLoadStore) {
7964         assert(isInSchedulingRegion(DepDest));
7965 
7966         // We have two limits to reduce the complexity:
7967         // 1) AliasedCheckLimit: It's a small limit to reduce calls to
7968         //    SLP->isAliased (which is the expensive part in this loop).
7969         // 2) MaxMemDepDistance: It's for very large blocks and it aborts
7970         //    the whole loop (even if the loop is fast, it's quadratic).
7971         //    It's important for the loop break condition (see below) to
7972         //    check this limit even between two read-only instructions.
7973         if (DistToSrc >= MaxMemDepDistance ||
7974             ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
7975              (numAliased >= AliasedCheckLimit ||
7976               SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
7977 
7978           // We increment the counter only if the locations are aliased
7979           // (instead of counting all alias checks). This gives a better
7980           // balance between reduced runtime and accurate dependencies.
7981           numAliased++;
7982 
7983           DepDest->MemoryDependencies.push_back(BundleMember);
7984           BundleMember->Dependencies++;
7985           ScheduleData *DestBundle = DepDest->FirstInBundle;
7986           if (!DestBundle->IsScheduled) {
7987             BundleMember->incrementUnscheduledDeps(1);
7988           }
7989           if (!DestBundle->hasValidDependencies()) {
7990             WorkList.push_back(DestBundle);
7991           }
7992         }
7993 
7994         // Example, explaining the loop break condition: Let's assume our
7995         // starting instruction is i0 and MaxMemDepDistance = 3.
7996         //
7997         //                      +--------v--v--v
7998         //             i0,i1,i2,i3,i4,i5,i6,i7,i8
7999         //             +--------^--^--^
8000         //
8001         // MaxMemDepDistance let us stop alias-checking at i3 and we add
8002         // dependencies from i0 to i3,i4,.. (even if they are not aliased).
8003         // Previously we already added dependencies from i3 to i6,i7,i8
8004         // (because of MaxMemDepDistance). As we added a dependency from
8005         // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
8006         // and we can abort this loop at i6.
8007         if (DistToSrc >= 2 * MaxMemDepDistance)
8008           break;
8009         DistToSrc++;
8010       }
8011     }
8012     if (InsertInReadyList && SD->isReady()) {
8013       ReadyInsts.insert(SD);
8014       LLVM_DEBUG(dbgs() << "SLP:     gets ready on update: " << *SD->Inst
8015                         << "\n");
8016     }
8017   }
8018 }
8019 
8020 void BoUpSLP::BlockScheduling::resetSchedule() {
8021   assert(ScheduleStart &&
8022          "tried to reset schedule on block which has not been scheduled");
8023   for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
8024     doForAllOpcodes(I, [&](ScheduleData *SD) {
8025       assert(isInSchedulingRegion(SD) &&
8026              "ScheduleData not in scheduling region");
8027       SD->IsScheduled = false;
8028       SD->resetUnscheduledDeps();
8029     });
8030   }
8031   ReadyInsts.clear();
8032 }
8033 
8034 void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
8035   if (!BS->ScheduleStart)
8036     return;
8037 
8038   LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n");
8039 
8040   BS->resetSchedule();
8041 
8042   // For the real scheduling we use a more sophisticated ready-list: it is
8043   // sorted by the original instruction location. This lets the final schedule
8044   // be as  close as possible to the original instruction order.
8045   struct ScheduleDataCompare {
8046     bool operator()(ScheduleData *SD1, ScheduleData *SD2) const {
8047       return SD2->SchedulingPriority < SD1->SchedulingPriority;
8048     }
8049   };
8050   std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts;
8051 
8052   // Ensure that all dependency data is updated and fill the ready-list with
8053   // initial instructions.
8054   int Idx = 0;
8055   int NumToSchedule = 0;
8056   for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd;
8057        I = I->getNextNode()) {
8058     BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) {
8059       assert((isVectorLikeInstWithConstOps(SD->Inst) ||
8060               SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr)) &&
8061              "scheduler and vectorizer bundle mismatch");
8062       SD->FirstInBundle->SchedulingPriority = Idx++;
8063       if (SD->isSchedulingEntity()) {
8064         BS->calculateDependencies(SD, false, this);
8065         NumToSchedule++;
8066       }
8067     });
8068   }
8069   BS->initialFillReadyList(ReadyInsts);
8070 
8071   Instruction *LastScheduledInst = BS->ScheduleEnd;
8072   MemoryAccess *MemInsertPt = nullptr;
8073   if (MSSA) {
8074     for (auto I = LastScheduledInst->getIterator(); I != BS->BB->end(); I++) {
8075       if (auto *Access = MSSA->getMemoryAccess(&*I)) {
8076         MemInsertPt = Access;
8077         break;
8078       }
8079     }
8080   }
8081 
8082   // Do the "real" scheduling.
8083   while (!ReadyInsts.empty()) {
8084     ScheduleData *picked = *ReadyInsts.begin();
8085     ReadyInsts.erase(ReadyInsts.begin());
8086 
8087     // Move the scheduled instruction(s) to their dedicated places, if not
8088     // there yet.
8089     for (ScheduleData *BundleMember = picked; BundleMember;
8090          BundleMember = BundleMember->NextInBundle) {
8091       Instruction *pickedInst = BundleMember->Inst;
8092       if (pickedInst->getNextNode() != LastScheduledInst) {
8093         pickedInst->moveBefore(LastScheduledInst);
8094         if (MSSA) {
8095           MemorySSAUpdater MSSAU(MSSA);
8096           if (auto *Access = MSSA->getMemoryAccess(pickedInst)) {
8097             if (MemInsertPt)
8098               MSSAU.moveBefore(Access, cast<MemoryUseOrDef>(MemInsertPt));
8099             else
8100               MSSAU.moveToPlace(Access, BS->BB,
8101                                 MemorySSA::InsertionPlace::End);
8102           }
8103         }
8104       }
8105 
8106       LastScheduledInst = pickedInst;
8107       if (MSSA)
8108         if (auto *Access = MSSA->getMemoryAccess(LastScheduledInst))
8109           MemInsertPt = Access;
8110     }
8111 
8112     BS->schedule(picked, ReadyInsts);
8113     NumToSchedule--;
8114   }
8115   assert(NumToSchedule == 0 && "could not schedule all instructions");
8116 
8117   // Check that we didn't break any of our invariants.
8118 #ifdef EXPENSIVE_CHECKS
8119   BS->verify();
8120 #endif
8121 
8122 #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
8123   // Check that all schedulable entities got scheduled
8124   for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; I = I->getNextNode()) {
8125     BS->doForAllOpcodes(I, [&](ScheduleData *SD) {
8126       if (SD->isSchedulingEntity() && SD->hasValidDependencies()) {
8127         assert(SD->IsScheduled && "must be scheduled at this point");
8128       }
8129     });
8130   }
8131 #endif
8132 
8133   // Avoid duplicate scheduling of the block.
8134   BS->ScheduleStart = nullptr;
8135 }
8136 
8137 unsigned BoUpSLP::getVectorElementSize(Value *V) {
8138   // If V is a store, just return the width of the stored value (or value
8139   // truncated just before storing) without traversing the expression tree.
8140   // This is the common case.
8141   if (auto *Store = dyn_cast<StoreInst>(V)) {
8142     if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand()))
8143       return DL->getTypeSizeInBits(Trunc->getSrcTy());
8144     return DL->getTypeSizeInBits(Store->getValueOperand()->getType());
8145   }
8146 
8147   if (auto *IEI = dyn_cast<InsertElementInst>(V))
8148     return getVectorElementSize(IEI->getOperand(1));
8149 
8150   auto E = InstrElementSize.find(V);
8151   if (E != InstrElementSize.end())
8152     return E->second;
8153 
8154   // If V is not a store, we can traverse the expression tree to find loads
8155   // that feed it. The type of the loaded value may indicate a more suitable
8156   // width than V's type. We want to base the vector element size on the width
8157   // of memory operations where possible.
8158   SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist;
8159   SmallPtrSet<Instruction *, 16> Visited;
8160   if (auto *I = dyn_cast<Instruction>(V)) {
8161     Worklist.emplace_back(I, I->getParent());
8162     Visited.insert(I);
8163   }
8164 
8165   // Traverse the expression tree in bottom-up order looking for loads. If we
8166   // encounter an instruction we don't yet handle, we give up.
8167   auto Width = 0u;
8168   while (!Worklist.empty()) {
8169     Instruction *I;
8170     BasicBlock *Parent;
8171     std::tie(I, Parent) = Worklist.pop_back_val();
8172 
8173     // We should only be looking at scalar instructions here. If the current
8174     // instruction has a vector type, skip.
8175     auto *Ty = I->getType();
8176     if (isa<VectorType>(Ty))
8177       continue;
8178 
8179     // If the current instruction is a load, update MaxWidth to reflect the
8180     // width of the loaded value.
8181     if (isa<LoadInst>(I) || isa<ExtractElementInst>(I) ||
8182         isa<ExtractValueInst>(I))
8183       Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty));
8184 
8185     // Otherwise, we need to visit the operands of the instruction. We only
8186     // handle the interesting cases from buildTree here. If an operand is an
8187     // instruction we haven't yet visited and from the same basic block as the
8188     // user or the use is a PHI node, we add it to the worklist.
8189     else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
8190              isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I) ||
8191              isa<UnaryOperator>(I)) {
8192       for (Use &U : I->operands())
8193         if (auto *J = dyn_cast<Instruction>(U.get()))
8194           if (Visited.insert(J).second &&
8195               (isa<PHINode>(I) || J->getParent() == Parent))
8196             Worklist.emplace_back(J, J->getParent());
8197     } else {
8198       break;
8199     }
8200   }
8201 
8202   // If we didn't encounter a memory access in the expression tree, or if we
8203   // gave up for some reason, just return the width of V. Otherwise, return the
8204   // maximum width we found.
8205   if (!Width) {
8206     if (auto *CI = dyn_cast<CmpInst>(V))
8207       V = CI->getOperand(0);
8208     Width = DL->getTypeSizeInBits(V->getType());
8209   }
8210 
8211   for (Instruction *I : Visited)
8212     InstrElementSize[I] = Width;
8213 
8214   return Width;
8215 }
8216 
8217 // Determine if a value V in a vectorizable expression Expr can be demoted to a
8218 // smaller type with a truncation. We collect the values that will be demoted
8219 // in ToDemote and additional roots that require investigating in Roots.
8220 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr,
8221                                   SmallVectorImpl<Value *> &ToDemote,
8222                                   SmallVectorImpl<Value *> &Roots) {
8223   // We can always demote constants.
8224   if (isa<Constant>(V)) {
8225     ToDemote.push_back(V);
8226     return true;
8227   }
8228 
8229   // If the value is not an instruction in the expression with only one use, it
8230   // cannot be demoted.
8231   auto *I = dyn_cast<Instruction>(V);
8232   if (!I || !I->hasOneUse() || !Expr.count(I))
8233     return false;
8234 
8235   switch (I->getOpcode()) {
8236 
8237   // We can always demote truncations and extensions. Since truncations can
8238   // seed additional demotion, we save the truncated value.
8239   case Instruction::Trunc:
8240     Roots.push_back(I->getOperand(0));
8241     break;
8242   case Instruction::ZExt:
8243   case Instruction::SExt:
8244     if (isa<ExtractElementInst>(I->getOperand(0)) ||
8245         isa<InsertElementInst>(I->getOperand(0)))
8246       return false;
8247     break;
8248 
8249   // We can demote certain binary operations if we can demote both of their
8250   // operands.
8251   case Instruction::Add:
8252   case Instruction::Sub:
8253   case Instruction::Mul:
8254   case Instruction::And:
8255   case Instruction::Or:
8256   case Instruction::Xor:
8257     if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) ||
8258         !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots))
8259       return false;
8260     break;
8261 
8262   // We can demote selects if we can demote their true and false values.
8263   case Instruction::Select: {
8264     SelectInst *SI = cast<SelectInst>(I);
8265     if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) ||
8266         !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots))
8267       return false;
8268     break;
8269   }
8270 
8271   // We can demote phis if we can demote all their incoming operands. Note that
8272   // we don't need to worry about cycles since we ensure single use above.
8273   case Instruction::PHI: {
8274     PHINode *PN = cast<PHINode>(I);
8275     for (Value *IncValue : PN->incoming_values())
8276       if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots))
8277         return false;
8278     break;
8279   }
8280 
8281   // Otherwise, conservatively give up.
8282   default:
8283     return false;
8284   }
8285 
8286   // Record the value that we can demote.
8287   ToDemote.push_back(V);
8288   return true;
8289 }
8290 
8291 void BoUpSLP::computeMinimumValueSizes() {
8292   // If there are no external uses, the expression tree must be rooted by a
8293   // store. We can't demote in-memory values, so there is nothing to do here.
8294   if (ExternalUses.empty())
8295     return;
8296 
8297   // We only attempt to truncate integer expressions.
8298   auto &TreeRoot = VectorizableTree[0]->Scalars;
8299   auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType());
8300   if (!TreeRootIT)
8301     return;
8302 
8303   // If the expression is not rooted by a store, these roots should have
8304   // external uses. We will rely on InstCombine to rewrite the expression in
8305   // the narrower type. However, InstCombine only rewrites single-use values.
8306   // This means that if a tree entry other than a root is used externally, it
8307   // must have multiple uses and InstCombine will not rewrite it. The code
8308   // below ensures that only the roots are used externally.
8309   SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end());
8310   for (auto &EU : ExternalUses)
8311     if (!Expr.erase(EU.Scalar))
8312       return;
8313   if (!Expr.empty())
8314     return;
8315 
8316   // Collect the scalar values of the vectorizable expression. We will use this
8317   // context to determine which values can be demoted. If we see a truncation,
8318   // we mark it as seeding another demotion.
8319   for (auto &EntryPtr : VectorizableTree)
8320     Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end());
8321 
8322   // Ensure the roots of the vectorizable tree don't form a cycle. They must
8323   // have a single external user that is not in the vectorizable tree.
8324   for (auto *Root : TreeRoot)
8325     if (!Root->hasOneUse() || Expr.count(*Root->user_begin()))
8326       return;
8327 
8328   // Conservatively determine if we can actually truncate the roots of the
8329   // expression. Collect the values that can be demoted in ToDemote and
8330   // additional roots that require investigating in Roots.
8331   SmallVector<Value *, 32> ToDemote;
8332   SmallVector<Value *, 4> Roots;
8333   for (auto *Root : TreeRoot)
8334     if (!collectValuesToDemote(Root, Expr, ToDemote, Roots))
8335       return;
8336 
8337   // The maximum bit width required to represent all the values that can be
8338   // demoted without loss of precision. It would be safe to truncate the roots
8339   // of the expression to this width.
8340   auto MaxBitWidth = 8u;
8341 
8342   // We first check if all the bits of the roots are demanded. If they're not,
8343   // we can truncate the roots to this narrower type.
8344   for (auto *Root : TreeRoot) {
8345     auto Mask = DB->getDemandedBits(cast<Instruction>(Root));
8346     MaxBitWidth = std::max<unsigned>(
8347         Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth);
8348   }
8349 
8350   // True if the roots can be zero-extended back to their original type, rather
8351   // than sign-extended. We know that if the leading bits are not demanded, we
8352   // can safely zero-extend. So we initialize IsKnownPositive to True.
8353   bool IsKnownPositive = true;
8354 
8355   // If all the bits of the roots are demanded, we can try a little harder to
8356   // compute a narrower type. This can happen, for example, if the roots are
8357   // getelementptr indices. InstCombine promotes these indices to the pointer
8358   // width. Thus, all their bits are technically demanded even though the
8359   // address computation might be vectorized in a smaller type.
8360   //
8361   // We start by looking at each entry that can be demoted. We compute the
8362   // maximum bit width required to store the scalar by using ValueTracking to
8363   // compute the number of high-order bits we can truncate.
8364   if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) &&
8365       llvm::all_of(TreeRoot, [](Value *R) {
8366         assert(R->hasOneUse() && "Root should have only one use!");
8367         return isa<GetElementPtrInst>(R->user_back());
8368       })) {
8369     MaxBitWidth = 8u;
8370 
8371     // Determine if the sign bit of all the roots is known to be zero. If not,
8372     // IsKnownPositive is set to False.
8373     IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) {
8374       KnownBits Known = computeKnownBits(R, *DL);
8375       return Known.isNonNegative();
8376     });
8377 
8378     // Determine the maximum number of bits required to store the scalar
8379     // values.
8380     for (auto *Scalar : ToDemote) {
8381       auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT);
8382       auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType());
8383       MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth);
8384     }
8385 
8386     // If we can't prove that the sign bit is zero, we must add one to the
8387     // maximum bit width to account for the unknown sign bit. This preserves
8388     // the existing sign bit so we can safely sign-extend the root back to the
8389     // original type. Otherwise, if we know the sign bit is zero, we will
8390     // zero-extend the root instead.
8391     //
8392     // FIXME: This is somewhat suboptimal, as there will be cases where adding
8393     //        one to the maximum bit width will yield a larger-than-necessary
8394     //        type. In general, we need to add an extra bit only if we can't
8395     //        prove that the upper bit of the original type is equal to the
8396     //        upper bit of the proposed smaller type. If these two bits are the
8397     //        same (either zero or one) we know that sign-extending from the
8398     //        smaller type will result in the same value. Here, since we can't
8399     //        yet prove this, we are just making the proposed smaller type
8400     //        larger to ensure correctness.
8401     if (!IsKnownPositive)
8402       ++MaxBitWidth;
8403   }
8404 
8405   // Round MaxBitWidth up to the next power-of-two.
8406   if (!isPowerOf2_64(MaxBitWidth))
8407     MaxBitWidth = NextPowerOf2(MaxBitWidth);
8408 
8409   // If the maximum bit width we compute is less than the with of the roots'
8410   // type, we can proceed with the narrowing. Otherwise, do nothing.
8411   if (MaxBitWidth >= TreeRootIT->getBitWidth())
8412     return;
8413 
8414   // If we can truncate the root, we must collect additional values that might
8415   // be demoted as a result. That is, those seeded by truncations we will
8416   // modify.
8417   while (!Roots.empty())
8418     collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots);
8419 
8420   // Finally, map the values we can demote to the maximum bit with we computed.
8421   for (auto *Scalar : ToDemote)
8422     MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive);
8423 }
8424 
8425 namespace {
8426 
8427 /// The SLPVectorizer Pass.
8428 struct SLPVectorizer : public FunctionPass {
8429   SLPVectorizerPass Impl;
8430 
8431   /// Pass identification, replacement for typeid
8432   static char ID;
8433 
8434   explicit SLPVectorizer() : FunctionPass(ID) {
8435     initializeSLPVectorizerPass(*PassRegistry::getPassRegistry());
8436   }
8437 
8438   bool doInitialization(Module &M) override { return false; }
8439 
8440   bool runOnFunction(Function &F) override {
8441     if (skipFunction(F))
8442       return false;
8443 
8444     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
8445     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
8446     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
8447     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
8448     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
8449     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
8450     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
8451     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
8452     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
8453     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
8454 
8455     return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, /*MSSA*/nullptr, ORE);
8456   }
8457 
8458   void getAnalysisUsage(AnalysisUsage &AU) const override {
8459     FunctionPass::getAnalysisUsage(AU);
8460     AU.addRequired<AssumptionCacheTracker>();
8461     AU.addRequired<ScalarEvolutionWrapperPass>();
8462     AU.addRequired<AAResultsWrapperPass>();
8463     AU.addRequired<TargetTransformInfoWrapperPass>();
8464     AU.addRequired<LoopInfoWrapperPass>();
8465     AU.addRequired<DominatorTreeWrapperPass>();
8466     AU.addRequired<DemandedBitsWrapperPass>();
8467     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
8468     AU.addRequired<InjectTLIMappingsLegacy>();
8469     AU.addPreserved<LoopInfoWrapperPass>();
8470     AU.addPreserved<DominatorTreeWrapperPass>();
8471     AU.addPreserved<AAResultsWrapperPass>();
8472     AU.addPreserved<GlobalsAAWrapperPass>();
8473     AU.setPreservesCFG();
8474   }
8475 };
8476 
8477 } // end anonymous namespace
8478 
8479 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) {
8480   auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
8481   auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
8482   auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F);
8483   auto *AA = &AM.getResult<AAManager>(F);
8484   auto *LI = &AM.getResult<LoopAnalysis>(F);
8485   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
8486   auto *AC = &AM.getResult<AssumptionAnalysis>(F);
8487   auto *DB = &AM.getResult<DemandedBitsAnalysis>(F);
8488   auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
8489   auto *MSSA = EnableMSSAInSLPVectorizer ?
8490     &AM.getResult<MemorySSAAnalysis>(F).getMSSA() : (MemorySSA*)nullptr;
8491 
8492   bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, MSSA, ORE);
8493   if (!Changed)
8494     return PreservedAnalyses::all();
8495 
8496   PreservedAnalyses PA;
8497   PA.preserveSet<CFGAnalyses>();
8498   if (MSSA) {
8499 #ifdef EXPENSIVE_CHECKS
8500     MSSA->verifyMemorySSA();
8501 #endif
8502     PA.preserve<MemorySSAAnalysis>();
8503   }
8504   return PA;
8505 }
8506 
8507 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
8508                                 TargetTransformInfo *TTI_,
8509                                 TargetLibraryInfo *TLI_, AAResults *AA_,
8510                                 LoopInfo *LI_, DominatorTree *DT_,
8511                                 AssumptionCache *AC_, DemandedBits *DB_,
8512                                 MemorySSA *MSSA,
8513                                 OptimizationRemarkEmitter *ORE_) {
8514   if (!RunSLPVectorization)
8515     return false;
8516   SE = SE_;
8517   TTI = TTI_;
8518   TLI = TLI_;
8519   AA = AA_;
8520   LI = LI_;
8521   DT = DT_;
8522   AC = AC_;
8523   DB = DB_;
8524   DL = &F.getParent()->getDataLayout();
8525 
8526   Stores.clear();
8527   GEPs.clear();
8528   bool Changed = false;
8529 
8530   // If the target claims to have no vector registers don't attempt
8531   // vectorization.
8532   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) {
8533     LLVM_DEBUG(
8534         dbgs() << "SLP: Didn't find any vector registers for target, abort.\n");
8535     return false;
8536   }
8537 
8538   // Don't vectorize when the attribute NoImplicitFloat is used.
8539   if (F.hasFnAttribute(Attribute::NoImplicitFloat))
8540     return false;
8541 
8542   LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
8543 
8544   // Use the bottom up slp vectorizer to construct chains that start with
8545   // store instructions.
8546   BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, MSSA, DL, ORE_);
8547 
8548   // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to
8549   // delete instructions.
8550 
8551   // Update DFS numbers now so that we can use them for ordering.
8552   DT->updateDFSNumbers();
8553 
8554   // Scan the blocks in the function in post order.
8555   for (auto BB : post_order(&F.getEntryBlock())) {
8556     collectSeedInstructions(BB);
8557 
8558     // Vectorize trees that end at stores.
8559     if (!Stores.empty()) {
8560       LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()
8561                         << " underlying objects.\n");
8562       Changed |= vectorizeStoreChains(R);
8563     }
8564 
8565     // Vectorize trees that end at reductions.
8566     Changed |= vectorizeChainsInBlock(BB, R);
8567 
8568     // Vectorize the index computations of getelementptr instructions. This
8569     // is primarily intended to catch gather-like idioms ending at
8570     // non-consecutive loads.
8571     if (!GEPs.empty()) {
8572       LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()
8573                         << " underlying objects.\n");
8574       Changed |= vectorizeGEPIndices(BB, R);
8575     }
8576   }
8577 
8578   if (Changed) {
8579     R.optimizeGatherSequence();
8580     LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
8581   }
8582   return Changed;
8583 }
8584 
8585 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
8586                                             unsigned Idx) {
8587   LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size()
8588                     << "\n");
8589   const unsigned Sz = R.getVectorElementSize(Chain[0]);
8590   const unsigned MinVF = R.getMinVecRegSize() / Sz;
8591   unsigned VF = Chain.size();
8592 
8593   if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF)
8594     return false;
8595 
8596   LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx
8597                     << "\n");
8598 
8599   R.buildTree(Chain);
8600   if (R.isTreeTinyAndNotFullyVectorizable())
8601     return false;
8602   if (R.isLoadCombineCandidate())
8603     return false;
8604   R.reorderTopToBottom();
8605   R.reorderBottomToTop();
8606   R.buildExternalUses();
8607 
8608   R.computeMinimumValueSizes();
8609 
8610   InstructionCost Cost = R.getTreeCost();
8611 
8612   LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF =" << VF << "\n");
8613   if (Cost < -SLPCostThreshold) {
8614     LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n");
8615 
8616     using namespace ore;
8617 
8618     R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized",
8619                                         cast<StoreInst>(Chain[0]))
8620                      << "Stores SLP vectorized with cost " << NV("Cost", Cost)
8621                      << " and with tree size "
8622                      << NV("TreeSize", R.getTreeSize()));
8623 
8624     R.vectorizeTree();
8625     return true;
8626   }
8627 
8628   return false;
8629 }
8630 
8631 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores,
8632                                         BoUpSLP &R) {
8633   // We may run into multiple chains that merge into a single chain. We mark the
8634   // stores that we vectorized so that we don't visit the same store twice.
8635   BoUpSLP::ValueSet VectorizedStores;
8636   bool Changed = false;
8637 
8638   int E = Stores.size();
8639   SmallBitVector Tails(E, false);
8640   int MaxIter = MaxStoreLookup.getValue();
8641   SmallVector<std::pair<int, int>, 16> ConsecutiveChain(
8642       E, std::make_pair(E, INT_MAX));
8643   SmallVector<SmallBitVector, 4> CheckedPairs(E, SmallBitVector(E, false));
8644   int IterCnt;
8645   auto &&FindConsecutiveAccess = [this, &Stores, &Tails, &IterCnt, MaxIter,
8646                                   &CheckedPairs,
8647                                   &ConsecutiveChain](int K, int Idx) {
8648     if (IterCnt >= MaxIter)
8649       return true;
8650     if (CheckedPairs[Idx].test(K))
8651       return ConsecutiveChain[K].second == 1 &&
8652              ConsecutiveChain[K].first == Idx;
8653     ++IterCnt;
8654     CheckedPairs[Idx].set(K);
8655     CheckedPairs[K].set(Idx);
8656     Optional<int> Diff = getPointersDiff(
8657         Stores[K]->getValueOperand()->getType(), Stores[K]->getPointerOperand(),
8658         Stores[Idx]->getValueOperand()->getType(),
8659         Stores[Idx]->getPointerOperand(), *DL, *SE, /*StrictCheck=*/true);
8660     if (!Diff || *Diff == 0)
8661       return false;
8662     int Val = *Diff;
8663     if (Val < 0) {
8664       if (ConsecutiveChain[Idx].second > -Val) {
8665         Tails.set(K);
8666         ConsecutiveChain[Idx] = std::make_pair(K, -Val);
8667       }
8668       return false;
8669     }
8670     if (ConsecutiveChain[K].second <= Val)
8671       return false;
8672 
8673     Tails.set(Idx);
8674     ConsecutiveChain[K] = std::make_pair(Idx, Val);
8675     return Val == 1;
8676   };
8677   // Do a quadratic search on all of the given stores in reverse order and find
8678   // all of the pairs of stores that follow each other.
8679   for (int Idx = E - 1; Idx >= 0; --Idx) {
8680     // If a store has multiple consecutive store candidates, search according
8681     // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ...
8682     // This is because usually pairing with immediate succeeding or preceding
8683     // candidate create the best chance to find slp vectorization opportunity.
8684     const int MaxLookDepth = std::max(E - Idx, Idx + 1);
8685     IterCnt = 0;
8686     for (int Offset = 1, F = MaxLookDepth; Offset < F; ++Offset)
8687       if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) ||
8688           (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx)))
8689         break;
8690   }
8691 
8692   // Tracks if we tried to vectorize stores starting from the given tail
8693   // already.
8694   SmallBitVector TriedTails(E, false);
8695   // For stores that start but don't end a link in the chain:
8696   for (int Cnt = E; Cnt > 0; --Cnt) {
8697     int I = Cnt - 1;
8698     if (ConsecutiveChain[I].first == E || Tails.test(I))
8699       continue;
8700     // We found a store instr that starts a chain. Now follow the chain and try
8701     // to vectorize it.
8702     BoUpSLP::ValueList Operands;
8703     // Collect the chain into a list.
8704     while (I != E && !VectorizedStores.count(Stores[I])) {
8705       Operands.push_back(Stores[I]);
8706       Tails.set(I);
8707       if (ConsecutiveChain[I].second != 1) {
8708         // Mark the new end in the chain and go back, if required. It might be
8709         // required if the original stores come in reversed order, for example.
8710         if (ConsecutiveChain[I].first != E &&
8711             Tails.test(ConsecutiveChain[I].first) && !TriedTails.test(I) &&
8712             !VectorizedStores.count(Stores[ConsecutiveChain[I].first])) {
8713           TriedTails.set(I);
8714           Tails.reset(ConsecutiveChain[I].first);
8715           if (Cnt < ConsecutiveChain[I].first + 2)
8716             Cnt = ConsecutiveChain[I].first + 2;
8717         }
8718         break;
8719       }
8720       // Move to the next value in the chain.
8721       I = ConsecutiveChain[I].first;
8722     }
8723     assert(!Operands.empty() && "Expected non-empty list of stores.");
8724 
8725     unsigned MaxVecRegSize = R.getMaxVecRegSize();
8726     unsigned EltSize = R.getVectorElementSize(Operands[0]);
8727     unsigned MaxElts = llvm::PowerOf2Floor(MaxVecRegSize / EltSize);
8728 
8729     unsigned MinVF = R.getMinVF(EltSize);
8730     unsigned MaxVF = std::min(R.getMaximumVF(EltSize, Instruction::Store),
8731                               MaxElts);
8732 
8733     // FIXME: Is division-by-2 the correct step? Should we assert that the
8734     // register size is a power-of-2?
8735     unsigned StartIdx = 0;
8736     for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) {
8737       for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) {
8738         ArrayRef<Value *> Slice = makeArrayRef(Operands).slice(Cnt, Size);
8739         if (!VectorizedStores.count(Slice.front()) &&
8740             !VectorizedStores.count(Slice.back()) &&
8741             vectorizeStoreChain(Slice, R, Cnt)) {
8742           // Mark the vectorized stores so that we don't vectorize them again.
8743           VectorizedStores.insert(Slice.begin(), Slice.end());
8744           Changed = true;
8745           // If we vectorized initial block, no need to try to vectorize it
8746           // again.
8747           if (Cnt == StartIdx)
8748             StartIdx += Size;
8749           Cnt += Size;
8750           continue;
8751         }
8752         ++Cnt;
8753       }
8754       // Check if the whole array was vectorized already - exit.
8755       if (StartIdx >= Operands.size())
8756         break;
8757     }
8758   }
8759 
8760   return Changed;
8761 }
8762 
8763 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) {
8764   // Initialize the collections. We will make a single pass over the block.
8765   Stores.clear();
8766   GEPs.clear();
8767 
8768   // Visit the store and getelementptr instructions in BB and organize them in
8769   // Stores and GEPs according to the underlying objects of their pointer
8770   // operands.
8771   for (Instruction &I : *BB) {
8772     // Ignore store instructions that are volatile or have a pointer operand
8773     // that doesn't point to a scalar type.
8774     if (auto *SI = dyn_cast<StoreInst>(&I)) {
8775       if (!SI->isSimple())
8776         continue;
8777       if (!isValidElementType(SI->getValueOperand()->getType()))
8778         continue;
8779       Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI);
8780     }
8781 
8782     // Ignore getelementptr instructions that have more than one index, a
8783     // constant index, or a pointer operand that doesn't point to a scalar
8784     // type.
8785     else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
8786       auto Idx = GEP->idx_begin()->get();
8787       if (GEP->getNumIndices() > 1 || isa<Constant>(Idx))
8788         continue;
8789       if (!isValidElementType(Idx->getType()))
8790         continue;
8791       if (GEP->getType()->isVectorTy())
8792         continue;
8793       GEPs[GEP->getPointerOperand()].push_back(GEP);
8794     }
8795   }
8796 }
8797 
8798 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) {
8799   if (!A || !B)
8800     return false;
8801   if (isa<InsertElementInst>(A) || isa<InsertElementInst>(B))
8802     return false;
8803   Value *VL[] = {A, B};
8804   return tryToVectorizeList(VL, R);
8805 }
8806 
8807 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
8808                                            bool LimitForRegisterSize) {
8809   if (VL.size() < 2)
8810     return false;
8811 
8812   LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = "
8813                     << VL.size() << ".\n");
8814 
8815   // Check that all of the parts are instructions of the same type,
8816   // we permit an alternate opcode via InstructionsState.
8817   InstructionsState S = getSameOpcode(VL);
8818   if (!S.getOpcode())
8819     return false;
8820 
8821   Instruction *I0 = cast<Instruction>(S.OpValue);
8822   // Make sure invalid types (including vector type) are rejected before
8823   // determining vectorization factor for scalar instructions.
8824   for (Value *V : VL) {
8825     Type *Ty = V->getType();
8826     if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) {
8827       // NOTE: the following will give user internal llvm type name, which may
8828       // not be useful.
8829       R.getORE()->emit([&]() {
8830         std::string type_str;
8831         llvm::raw_string_ostream rso(type_str);
8832         Ty->print(rso);
8833         return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0)
8834                << "Cannot SLP vectorize list: type "
8835                << rso.str() + " is unsupported by vectorizer";
8836       });
8837       return false;
8838     }
8839   }
8840 
8841   unsigned Sz = R.getVectorElementSize(I0);
8842   unsigned MinVF = R.getMinVF(Sz);
8843   unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF);
8844   MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF);
8845   if (MaxVF < 2) {
8846     R.getORE()->emit([&]() {
8847       return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0)
8848              << "Cannot SLP vectorize list: vectorization factor "
8849              << "less than 2 is not supported";
8850     });
8851     return false;
8852   }
8853 
8854   bool Changed = false;
8855   bool CandidateFound = false;
8856   InstructionCost MinCost = SLPCostThreshold.getValue();
8857   Type *ScalarTy = VL[0]->getType();
8858   if (auto *IE = dyn_cast<InsertElementInst>(VL[0]))
8859     ScalarTy = IE->getOperand(1)->getType();
8860 
8861   unsigned NextInst = 0, MaxInst = VL.size();
8862   for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) {
8863     // No actual vectorization should happen, if number of parts is the same as
8864     // provided vectorization factor (i.e. the scalar type is used for vector
8865     // code during codegen).
8866     auto *VecTy = FixedVectorType::get(ScalarTy, VF);
8867     if (TTI->getNumberOfParts(VecTy) == VF)
8868       continue;
8869     for (unsigned I = NextInst; I < MaxInst; ++I) {
8870       unsigned OpsWidth = 0;
8871 
8872       if (I + VF > MaxInst)
8873         OpsWidth = MaxInst - I;
8874       else
8875         OpsWidth = VF;
8876 
8877       if (!isPowerOf2_32(OpsWidth))
8878         continue;
8879 
8880       if ((LimitForRegisterSize && OpsWidth < MaxVF) ||
8881           (VF > MinVF && OpsWidth <= VF / 2) || (VF == MinVF && OpsWidth < 2))
8882         break;
8883 
8884       ArrayRef<Value *> Ops = VL.slice(I, OpsWidth);
8885       // Check that a previous iteration of this loop did not delete the Value.
8886       if (llvm::any_of(Ops, [&R](Value *V) {
8887             auto *I = dyn_cast<Instruction>(V);
8888             return I && R.isDeleted(I);
8889           }))
8890         continue;
8891 
8892       LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "
8893                         << "\n");
8894 
8895       R.buildTree(Ops);
8896       if (R.isTreeTinyAndNotFullyVectorizable())
8897         continue;
8898       R.reorderTopToBottom();
8899       R.reorderBottomToTop(!isa<InsertElementInst>(Ops.front()));
8900       R.buildExternalUses();
8901 
8902       R.computeMinimumValueSizes();
8903       InstructionCost Cost = R.getTreeCost();
8904       CandidateFound = true;
8905       MinCost = std::min(MinCost, Cost);
8906 
8907       if (Cost < -SLPCostThreshold) {
8908         LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n");
8909         R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList",
8910                                                     cast<Instruction>(Ops[0]))
8911                                  << "SLP vectorized with cost " << ore::NV("Cost", Cost)
8912                                  << " and with tree size "
8913                                  << ore::NV("TreeSize", R.getTreeSize()));
8914 
8915         R.vectorizeTree();
8916         // Move to the next bundle.
8917         I += VF - 1;
8918         NextInst = I + 1;
8919         Changed = true;
8920       }
8921     }
8922   }
8923 
8924   if (!Changed && CandidateFound) {
8925     R.getORE()->emit([&]() {
8926       return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0)
8927              << "List vectorization was possible but not beneficial with cost "
8928              << ore::NV("Cost", MinCost) << " >= "
8929              << ore::NV("Treshold", -SLPCostThreshold);
8930     });
8931   } else if (!Changed) {
8932     R.getORE()->emit([&]() {
8933       return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0)
8934              << "Cannot SLP vectorize list: vectorization was impossible"
8935              << " with available vectorization factors";
8936     });
8937   }
8938   return Changed;
8939 }
8940 
8941 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) {
8942   if (!I)
8943     return false;
8944 
8945   if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I))
8946     return false;
8947 
8948   Value *P = I->getParent();
8949 
8950   // Vectorize in current basic block only.
8951   auto *Op0 = dyn_cast<Instruction>(I->getOperand(0));
8952   auto *Op1 = dyn_cast<Instruction>(I->getOperand(1));
8953   if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P)
8954     return false;
8955 
8956   // Try to vectorize V.
8957   if (tryToVectorizePair(Op0, Op1, R))
8958     return true;
8959 
8960   auto *A = dyn_cast<BinaryOperator>(Op0);
8961   auto *B = dyn_cast<BinaryOperator>(Op1);
8962   // Try to skip B.
8963   if (B && B->hasOneUse()) {
8964     auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0));
8965     auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1));
8966     if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R))
8967       return true;
8968     if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R))
8969       return true;
8970   }
8971 
8972   // Try to skip A.
8973   if (A && A->hasOneUse()) {
8974     auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0));
8975     auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1));
8976     if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R))
8977       return true;
8978     if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R))
8979       return true;
8980   }
8981   return false;
8982 }
8983 
8984 namespace {
8985 
8986 /// Model horizontal reductions.
8987 ///
8988 /// A horizontal reduction is a tree of reduction instructions that has values
8989 /// that can be put into a vector as its leaves. For example:
8990 ///
8991 /// mul mul mul mul
8992 ///  \  /    \  /
8993 ///   +       +
8994 ///    \     /
8995 ///       +
8996 /// This tree has "mul" as its leaf values and "+" as its reduction
8997 /// instructions. A reduction can feed into a store or a binary operation
8998 /// feeding a phi.
8999 ///    ...
9000 ///    \  /
9001 ///     +
9002 ///     |
9003 ///  phi +=
9004 ///
9005 ///  Or:
9006 ///    ...
9007 ///    \  /
9008 ///     +
9009 ///     |
9010 ///   *p =
9011 ///
9012 class HorizontalReduction {
9013   using ReductionOpsType = SmallVector<Value *, 16>;
9014   using ReductionOpsListType = SmallVector<ReductionOpsType, 2>;
9015   ReductionOpsListType ReductionOps;
9016   SmallVector<Value *, 32> ReducedVals;
9017   // Use map vector to make stable output.
9018   MapVector<Instruction *, Value *> ExtraArgs;
9019   WeakTrackingVH ReductionRoot;
9020   /// The type of reduction operation.
9021   RecurKind RdxKind;
9022 
9023   const unsigned INVALID_OPERAND_INDEX = std::numeric_limits<unsigned>::max();
9024 
9025   static bool isCmpSelMinMax(Instruction *I) {
9026     return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) &&
9027            RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I));
9028   }
9029 
9030   // And/or are potentially poison-safe logical patterns like:
9031   // select x, y, false
9032   // select x, true, y
9033   static bool isBoolLogicOp(Instruction *I) {
9034     return match(I, m_LogicalAnd(m_Value(), m_Value())) ||
9035            match(I, m_LogicalOr(m_Value(), m_Value()));
9036   }
9037 
9038   /// Checks if instruction is associative and can be vectorized.
9039   static bool isVectorizable(RecurKind Kind, Instruction *I) {
9040     if (Kind == RecurKind::None)
9041       return false;
9042 
9043     // Integer ops that map to select instructions or intrinsics are fine.
9044     if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) ||
9045         isBoolLogicOp(I))
9046       return true;
9047 
9048     if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) {
9049       // FP min/max are associative except for NaN and -0.0. We do not
9050       // have to rule out -0.0 here because the intrinsic semantics do not
9051       // specify a fixed result for it.
9052       return I->getFastMathFlags().noNaNs();
9053     }
9054 
9055     return I->isAssociative();
9056   }
9057 
9058   static Value *getRdxOperand(Instruction *I, unsigned Index) {
9059     // Poison-safe 'or' takes the form: select X, true, Y
9060     // To make that work with the normal operand processing, we skip the
9061     // true value operand.
9062     // TODO: Change the code and data structures to handle this without a hack.
9063     if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1)
9064       return I->getOperand(2);
9065     return I->getOperand(Index);
9066   }
9067 
9068   /// Checks if the ParentStackElem.first should be marked as a reduction
9069   /// operation with an extra argument or as extra argument itself.
9070   void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem,
9071                     Value *ExtraArg) {
9072     if (ExtraArgs.count(ParentStackElem.first)) {
9073       ExtraArgs[ParentStackElem.first] = nullptr;
9074       // We ran into something like:
9075       // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg.
9076       // The whole ParentStackElem.first should be considered as an extra value
9077       // in this case.
9078       // Do not perform analysis of remaining operands of ParentStackElem.first
9079       // instruction, this whole instruction is an extra argument.
9080       ParentStackElem.second = INVALID_OPERAND_INDEX;
9081     } else {
9082       // We ran into something like:
9083       // ParentStackElem.first += ... + ExtraArg + ...
9084       ExtraArgs[ParentStackElem.first] = ExtraArg;
9085     }
9086   }
9087 
9088   /// Creates reduction operation with the current opcode.
9089   static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS,
9090                          Value *RHS, const Twine &Name, bool UseSelect) {
9091     unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind);
9092     switch (Kind) {
9093     case RecurKind::Or:
9094       if (UseSelect &&
9095           LHS->getType() == CmpInst::makeCmpResultType(LHS->getType()))
9096         return Builder.CreateSelect(LHS, Builder.getTrue(), RHS, Name);
9097       return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
9098                                  Name);
9099     case RecurKind::And:
9100       if (UseSelect &&
9101           LHS->getType() == CmpInst::makeCmpResultType(LHS->getType()))
9102         return Builder.CreateSelect(LHS, RHS, Builder.getFalse(), Name);
9103       return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
9104                                  Name);
9105     case RecurKind::Add:
9106     case RecurKind::Mul:
9107     case RecurKind::Xor:
9108     case RecurKind::FAdd:
9109     case RecurKind::FMul:
9110       return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
9111                                  Name);
9112     case RecurKind::FMax:
9113       return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS);
9114     case RecurKind::FMin:
9115       return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS);
9116     case RecurKind::SMax:
9117       if (UseSelect) {
9118         Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name);
9119         return Builder.CreateSelect(Cmp, LHS, RHS, Name);
9120       }
9121       return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS);
9122     case RecurKind::SMin:
9123       if (UseSelect) {
9124         Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name);
9125         return Builder.CreateSelect(Cmp, LHS, RHS, Name);
9126       }
9127       return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS);
9128     case RecurKind::UMax:
9129       if (UseSelect) {
9130         Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name);
9131         return Builder.CreateSelect(Cmp, LHS, RHS, Name);
9132       }
9133       return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS);
9134     case RecurKind::UMin:
9135       if (UseSelect) {
9136         Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name);
9137         return Builder.CreateSelect(Cmp, LHS, RHS, Name);
9138       }
9139       return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS);
9140     default:
9141       llvm_unreachable("Unknown reduction operation.");
9142     }
9143   }
9144 
9145   /// Creates reduction operation with the current opcode with the IR flags
9146   /// from \p ReductionOps.
9147   static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS,
9148                          Value *RHS, const Twine &Name,
9149                          const ReductionOpsListType &ReductionOps) {
9150     bool UseSelect = ReductionOps.size() == 2 ||
9151                      // Logical or/and.
9152                      (ReductionOps.size() == 1 &&
9153                       isa<SelectInst>(ReductionOps.front().front()));
9154     assert((!UseSelect || ReductionOps.size() != 2 ||
9155             isa<SelectInst>(ReductionOps[1][0])) &&
9156            "Expected cmp + select pairs for reduction");
9157     Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect);
9158     if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) {
9159       if (auto *Sel = dyn_cast<SelectInst>(Op)) {
9160         propagateIRFlags(Sel->getCondition(), ReductionOps[0]);
9161         propagateIRFlags(Op, ReductionOps[1]);
9162         return Op;
9163       }
9164     }
9165     propagateIRFlags(Op, ReductionOps[0]);
9166     return Op;
9167   }
9168 
9169   /// Creates reduction operation with the current opcode with the IR flags
9170   /// from \p I.
9171   static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS,
9172                          Value *RHS, const Twine &Name, Instruction *I) {
9173     auto *SelI = dyn_cast<SelectInst>(I);
9174     Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, SelI != nullptr);
9175     if (SelI && RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) {
9176       if (auto *Sel = dyn_cast<SelectInst>(Op))
9177         propagateIRFlags(Sel->getCondition(), SelI->getCondition());
9178     }
9179     propagateIRFlags(Op, I);
9180     return Op;
9181   }
9182 
9183   static RecurKind getRdxKind(Instruction *I) {
9184     assert(I && "Expected instruction for reduction matching");
9185     if (match(I, m_Add(m_Value(), m_Value())))
9186       return RecurKind::Add;
9187     if (match(I, m_Mul(m_Value(), m_Value())))
9188       return RecurKind::Mul;
9189     if (match(I, m_And(m_Value(), m_Value())) ||
9190         match(I, m_LogicalAnd(m_Value(), m_Value())))
9191       return RecurKind::And;
9192     if (match(I, m_Or(m_Value(), m_Value())) ||
9193         match(I, m_LogicalOr(m_Value(), m_Value())))
9194       return RecurKind::Or;
9195     if (match(I, m_Xor(m_Value(), m_Value())))
9196       return RecurKind::Xor;
9197     if (match(I, m_FAdd(m_Value(), m_Value())))
9198       return RecurKind::FAdd;
9199     if (match(I, m_FMul(m_Value(), m_Value())))
9200       return RecurKind::FMul;
9201 
9202     if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value())))
9203       return RecurKind::FMax;
9204     if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value())))
9205       return RecurKind::FMin;
9206 
9207     // This matches either cmp+select or intrinsics. SLP is expected to handle
9208     // either form.
9209     // TODO: If we are canonicalizing to intrinsics, we can remove several
9210     //       special-case paths that deal with selects.
9211     if (match(I, m_SMax(m_Value(), m_Value())))
9212       return RecurKind::SMax;
9213     if (match(I, m_SMin(m_Value(), m_Value())))
9214       return RecurKind::SMin;
9215     if (match(I, m_UMax(m_Value(), m_Value())))
9216       return RecurKind::UMax;
9217     if (match(I, m_UMin(m_Value(), m_Value())))
9218       return RecurKind::UMin;
9219 
9220     if (auto *Select = dyn_cast<SelectInst>(I)) {
9221       // Try harder: look for min/max pattern based on instructions producing
9222       // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2).
9223       // During the intermediate stages of SLP, it's very common to have
9224       // pattern like this (since optimizeGatherSequence is run only once
9225       // at the end):
9226       // %1 = extractelement <2 x i32> %a, i32 0
9227       // %2 = extractelement <2 x i32> %a, i32 1
9228       // %cond = icmp sgt i32 %1, %2
9229       // %3 = extractelement <2 x i32> %a, i32 0
9230       // %4 = extractelement <2 x i32> %a, i32 1
9231       // %select = select i1 %cond, i32 %3, i32 %4
9232       CmpInst::Predicate Pred;
9233       Instruction *L1;
9234       Instruction *L2;
9235 
9236       Value *LHS = Select->getTrueValue();
9237       Value *RHS = Select->getFalseValue();
9238       Value *Cond = Select->getCondition();
9239 
9240       // TODO: Support inverse predicates.
9241       if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) {
9242         if (!isa<ExtractElementInst>(RHS) ||
9243             !L2->isIdenticalTo(cast<Instruction>(RHS)))
9244           return RecurKind::None;
9245       } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) {
9246         if (!isa<ExtractElementInst>(LHS) ||
9247             !L1->isIdenticalTo(cast<Instruction>(LHS)))
9248           return RecurKind::None;
9249       } else {
9250         if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS))
9251           return RecurKind::None;
9252         if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) ||
9253             !L1->isIdenticalTo(cast<Instruction>(LHS)) ||
9254             !L2->isIdenticalTo(cast<Instruction>(RHS)))
9255           return RecurKind::None;
9256       }
9257 
9258       switch (Pred) {
9259       default:
9260         return RecurKind::None;
9261       case CmpInst::ICMP_SGT:
9262       case CmpInst::ICMP_SGE:
9263         return RecurKind::SMax;
9264       case CmpInst::ICMP_SLT:
9265       case CmpInst::ICMP_SLE:
9266         return RecurKind::SMin;
9267       case CmpInst::ICMP_UGT:
9268       case CmpInst::ICMP_UGE:
9269         return RecurKind::UMax;
9270       case CmpInst::ICMP_ULT:
9271       case CmpInst::ICMP_ULE:
9272         return RecurKind::UMin;
9273       }
9274     }
9275     return RecurKind::None;
9276   }
9277 
9278   /// Get the index of the first operand.
9279   static unsigned getFirstOperandIndex(Instruction *I) {
9280     return isCmpSelMinMax(I) ? 1 : 0;
9281   }
9282 
9283   /// Total number of operands in the reduction operation.
9284   static unsigned getNumberOfOperands(Instruction *I) {
9285     return isCmpSelMinMax(I) ? 3 : 2;
9286   }
9287 
9288   /// Checks if the instruction is in basic block \p BB.
9289   /// For a cmp+sel min/max reduction check that both ops are in \p BB.
9290   static bool hasSameParent(Instruction *I, BasicBlock *BB) {
9291     if (isCmpSelMinMax(I) || (isBoolLogicOp(I) && isa<SelectInst>(I))) {
9292       auto *Sel = cast<SelectInst>(I);
9293       auto *Cmp = dyn_cast<Instruction>(Sel->getCondition());
9294       return Sel->getParent() == BB && Cmp && Cmp->getParent() == BB;
9295     }
9296     return I->getParent() == BB;
9297   }
9298 
9299   /// Expected number of uses for reduction operations/reduced values.
9300   static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) {
9301     if (IsCmpSelMinMax) {
9302       // SelectInst must be used twice while the condition op must have single
9303       // use only.
9304       if (auto *Sel = dyn_cast<SelectInst>(I))
9305         return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse();
9306       return I->hasNUses(2);
9307     }
9308 
9309     // Arithmetic reduction operation must be used once only.
9310     return I->hasOneUse();
9311   }
9312 
9313   /// Initializes the list of reduction operations.
9314   void initReductionOps(Instruction *I) {
9315     if (isCmpSelMinMax(I))
9316       ReductionOps.assign(2, ReductionOpsType());
9317     else
9318       ReductionOps.assign(1, ReductionOpsType());
9319   }
9320 
9321   /// Add all reduction operations for the reduction instruction \p I.
9322   void addReductionOps(Instruction *I) {
9323     if (isCmpSelMinMax(I)) {
9324       ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition());
9325       ReductionOps[1].emplace_back(I);
9326     } else {
9327       ReductionOps[0].emplace_back(I);
9328     }
9329   }
9330 
9331   static Value *getLHS(RecurKind Kind, Instruction *I) {
9332     if (Kind == RecurKind::None)
9333       return nullptr;
9334     return I->getOperand(getFirstOperandIndex(I));
9335   }
9336   static Value *getRHS(RecurKind Kind, Instruction *I) {
9337     if (Kind == RecurKind::None)
9338       return nullptr;
9339     return I->getOperand(getFirstOperandIndex(I) + 1);
9340   }
9341 
9342 public:
9343   HorizontalReduction() = default;
9344 
9345   /// Try to find a reduction tree.
9346   bool matchAssociativeReduction(PHINode *Phi, Instruction *Inst) {
9347     assert((!Phi || is_contained(Phi->operands(), Inst)) &&
9348            "Phi needs to use the binary operator");
9349     assert((isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) ||
9350             isa<IntrinsicInst>(Inst)) &&
9351            "Expected binop, select, or intrinsic for reduction matching");
9352     RdxKind = getRdxKind(Inst);
9353 
9354     // We could have a initial reductions that is not an add.
9355     //  r *= v1 + v2 + v3 + v4
9356     // In such a case start looking for a tree rooted in the first '+'.
9357     if (Phi) {
9358       if (getLHS(RdxKind, Inst) == Phi) {
9359         Phi = nullptr;
9360         Inst = dyn_cast<Instruction>(getRHS(RdxKind, Inst));
9361         if (!Inst)
9362           return false;
9363         RdxKind = getRdxKind(Inst);
9364       } else if (getRHS(RdxKind, Inst) == Phi) {
9365         Phi = nullptr;
9366         Inst = dyn_cast<Instruction>(getLHS(RdxKind, Inst));
9367         if (!Inst)
9368           return false;
9369         RdxKind = getRdxKind(Inst);
9370       }
9371     }
9372 
9373     if (!isVectorizable(RdxKind, Inst))
9374       return false;
9375 
9376     // Analyze "regular" integer/FP types for reductions - no target-specific
9377     // types or pointers.
9378     Type *Ty = Inst->getType();
9379     if (!isValidElementType(Ty) || Ty->isPointerTy())
9380       return false;
9381 
9382     // Though the ultimate reduction may have multiple uses, its condition must
9383     // have only single use.
9384     if (auto *Sel = dyn_cast<SelectInst>(Inst))
9385       if (!Sel->getCondition()->hasOneUse())
9386         return false;
9387 
9388     ReductionRoot = Inst;
9389 
9390     // The opcode for leaf values that we perform a reduction on.
9391     // For example: load(x) + load(y) + load(z) + fptoui(w)
9392     // The leaf opcode for 'w' does not match, so we don't include it as a
9393     // potential candidate for the reduction.
9394     unsigned LeafOpcode = 0;
9395 
9396     // Post-order traverse the reduction tree starting at Inst. We only handle
9397     // true trees containing binary operators or selects.
9398     SmallVector<std::pair<Instruction *, unsigned>, 32> Stack;
9399     Stack.push_back(std::make_pair(Inst, getFirstOperandIndex(Inst)));
9400     initReductionOps(Inst);
9401     while (!Stack.empty()) {
9402       Instruction *TreeN = Stack.back().first;
9403       unsigned EdgeToVisit = Stack.back().second++;
9404       const RecurKind TreeRdxKind = getRdxKind(TreeN);
9405       bool IsReducedValue = TreeRdxKind != RdxKind;
9406 
9407       // Postorder visit.
9408       if (IsReducedValue || EdgeToVisit >= getNumberOfOperands(TreeN)) {
9409         if (IsReducedValue)
9410           ReducedVals.push_back(TreeN);
9411         else {
9412           auto ExtraArgsIter = ExtraArgs.find(TreeN);
9413           if (ExtraArgsIter != ExtraArgs.end() && !ExtraArgsIter->second) {
9414             // Check if TreeN is an extra argument of its parent operation.
9415             if (Stack.size() <= 1) {
9416               // TreeN can't be an extra argument as it is a root reduction
9417               // operation.
9418               return false;
9419             }
9420             // Yes, TreeN is an extra argument, do not add it to a list of
9421             // reduction operations.
9422             // Stack[Stack.size() - 2] always points to the parent operation.
9423             markExtraArg(Stack[Stack.size() - 2], TreeN);
9424             ExtraArgs.erase(TreeN);
9425           } else
9426             addReductionOps(TreeN);
9427         }
9428         // Retract.
9429         Stack.pop_back();
9430         continue;
9431       }
9432 
9433       // Visit operands.
9434       Value *EdgeVal = getRdxOperand(TreeN, EdgeToVisit);
9435       auto *EdgeInst = dyn_cast<Instruction>(EdgeVal);
9436       if (!EdgeInst) {
9437         // Edge value is not a reduction instruction or a leaf instruction.
9438         // (It may be a constant, function argument, or something else.)
9439         markExtraArg(Stack.back(), EdgeVal);
9440         continue;
9441       }
9442       RecurKind EdgeRdxKind = getRdxKind(EdgeInst);
9443       // Continue analysis if the next operand is a reduction operation or
9444       // (possibly) a leaf value. If the leaf value opcode is not set,
9445       // the first met operation != reduction operation is considered as the
9446       // leaf opcode.
9447       // Only handle trees in the current basic block.
9448       // Each tree node needs to have minimal number of users except for the
9449       // ultimate reduction.
9450       const bool IsRdxInst = EdgeRdxKind == RdxKind;
9451       if (EdgeInst != Phi && EdgeInst != Inst &&
9452           hasSameParent(EdgeInst, Inst->getParent()) &&
9453           hasRequiredNumberOfUses(isCmpSelMinMax(Inst), EdgeInst) &&
9454           (!LeafOpcode || LeafOpcode == EdgeInst->getOpcode() || IsRdxInst)) {
9455         if (IsRdxInst) {
9456           // We need to be able to reassociate the reduction operations.
9457           if (!isVectorizable(EdgeRdxKind, EdgeInst)) {
9458             // I is an extra argument for TreeN (its parent operation).
9459             markExtraArg(Stack.back(), EdgeInst);
9460             continue;
9461           }
9462         } else if (!LeafOpcode) {
9463           LeafOpcode = EdgeInst->getOpcode();
9464         }
9465         Stack.push_back(
9466             std::make_pair(EdgeInst, getFirstOperandIndex(EdgeInst)));
9467         continue;
9468       }
9469       // I is an extra argument for TreeN (its parent operation).
9470       markExtraArg(Stack.back(), EdgeInst);
9471     }
9472     return true;
9473   }
9474 
9475   /// Attempt to vectorize the tree found by matchAssociativeReduction.
9476   Value *tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) {
9477     // If there are a sufficient number of reduction values, reduce
9478     // to a nearby power-of-2. We can safely generate oversized
9479     // vectors and rely on the backend to split them to legal sizes.
9480     unsigned NumReducedVals = ReducedVals.size();
9481     if (NumReducedVals < 4)
9482       return nullptr;
9483 
9484     // Intersect the fast-math-flags from all reduction operations.
9485     FastMathFlags RdxFMF;
9486     RdxFMF.set();
9487     for (ReductionOpsType &RdxOp : ReductionOps) {
9488       for (Value *RdxVal : RdxOp) {
9489         if (auto *FPMO = dyn_cast<FPMathOperator>(RdxVal))
9490           RdxFMF &= FPMO->getFastMathFlags();
9491       }
9492     }
9493 
9494     IRBuilder<> Builder(cast<Instruction>(ReductionRoot));
9495     Builder.setFastMathFlags(RdxFMF);
9496 
9497     BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues;
9498     // The same extra argument may be used several times, so log each attempt
9499     // to use it.
9500     for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) {
9501       assert(Pair.first && "DebugLoc must be set.");
9502       ExternallyUsedValues[Pair.second].push_back(Pair.first);
9503     }
9504 
9505     // The compare instruction of a min/max is the insertion point for new
9506     // instructions and may be replaced with a new compare instruction.
9507     auto getCmpForMinMaxReduction = [](Instruction *RdxRootInst) {
9508       assert(isa<SelectInst>(RdxRootInst) &&
9509              "Expected min/max reduction to have select root instruction");
9510       Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition();
9511       assert(isa<Instruction>(ScalarCond) &&
9512              "Expected min/max reduction to have compare condition");
9513       return cast<Instruction>(ScalarCond);
9514     };
9515 
9516     // The reduction root is used as the insertion point for new instructions,
9517     // so set it as externally used to prevent it from being deleted.
9518     ExternallyUsedValues[ReductionRoot];
9519     SmallVector<Value *, 16> IgnoreList;
9520     for (ReductionOpsType &RdxOp : ReductionOps)
9521       IgnoreList.append(RdxOp.begin(), RdxOp.end());
9522 
9523     unsigned ReduxWidth = PowerOf2Floor(NumReducedVals);
9524     if (NumReducedVals > ReduxWidth) {
9525       // In the loop below, we are building a tree based on a window of
9526       // 'ReduxWidth' values.
9527       // If the operands of those values have common traits (compare predicate,
9528       // constant operand, etc), then we want to group those together to
9529       // minimize the cost of the reduction.
9530 
9531       // TODO: This should be extended to count common operands for
9532       //       compares and binops.
9533 
9534       // Step 1: Count the number of times each compare predicate occurs.
9535       SmallDenseMap<unsigned, unsigned> PredCountMap;
9536       for (Value *RdxVal : ReducedVals) {
9537         CmpInst::Predicate Pred;
9538         if (match(RdxVal, m_Cmp(Pred, m_Value(), m_Value())))
9539           ++PredCountMap[Pred];
9540       }
9541       // Step 2: Sort the values so the most common predicates come first.
9542       stable_sort(ReducedVals, [&PredCountMap](Value *A, Value *B) {
9543         CmpInst::Predicate PredA, PredB;
9544         if (match(A, m_Cmp(PredA, m_Value(), m_Value())) &&
9545             match(B, m_Cmp(PredB, m_Value(), m_Value()))) {
9546           return PredCountMap[PredA] > PredCountMap[PredB];
9547         }
9548         return false;
9549       });
9550     }
9551 
9552     Value *VectorizedTree = nullptr;
9553     unsigned i = 0;
9554     while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) {
9555       ArrayRef<Value *> VL(&ReducedVals[i], ReduxWidth);
9556       V.buildTree(VL, IgnoreList);
9557       if (V.isTreeTinyAndNotFullyVectorizable(/*ForReduction=*/true))
9558         break;
9559       if (V.isLoadCombineReductionCandidate(RdxKind))
9560         break;
9561       V.reorderTopToBottom();
9562       V.reorderBottomToTop(/*IgnoreReorder=*/true);
9563       V.buildExternalUses(ExternallyUsedValues);
9564 
9565       // For a poison-safe boolean logic reduction, do not replace select
9566       // instructions with logic ops. All reduced values will be frozen (see
9567       // below) to prevent leaking poison.
9568       if (isa<SelectInst>(ReductionRoot) &&
9569           isBoolLogicOp(cast<Instruction>(ReductionRoot)) &&
9570           NumReducedVals != ReduxWidth)
9571         break;
9572 
9573       V.computeMinimumValueSizes();
9574 
9575       // Estimate cost.
9576       InstructionCost TreeCost =
9577           V.getTreeCost(makeArrayRef(&ReducedVals[i], ReduxWidth));
9578       InstructionCost ReductionCost =
9579           getReductionCost(TTI, ReducedVals[i], ReduxWidth, RdxFMF);
9580       InstructionCost Cost = TreeCost + ReductionCost;
9581       if (!Cost.isValid()) {
9582         LLVM_DEBUG(dbgs() << "Encountered invalid baseline cost.\n");
9583         return nullptr;
9584       }
9585       if (Cost >= -SLPCostThreshold) {
9586         V.getORE()->emit([&]() {
9587           return OptimizationRemarkMissed(SV_NAME, "HorSLPNotBeneficial",
9588                                           cast<Instruction>(VL[0]))
9589                  << "Vectorizing horizontal reduction is possible"
9590                  << "but not beneficial with cost " << ore::NV("Cost", Cost)
9591                  << " and threshold "
9592                  << ore::NV("Threshold", -SLPCostThreshold);
9593         });
9594         break;
9595       }
9596 
9597       LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:"
9598                         << Cost << ". (HorRdx)\n");
9599       V.getORE()->emit([&]() {
9600         return OptimizationRemark(SV_NAME, "VectorizedHorizontalReduction",
9601                                   cast<Instruction>(VL[0]))
9602                << "Vectorized horizontal reduction with cost "
9603                << ore::NV("Cost", Cost) << " and with tree size "
9604                << ore::NV("TreeSize", V.getTreeSize());
9605       });
9606 
9607       // Vectorize a tree.
9608       DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc();
9609       Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues);
9610 
9611       // Emit a reduction. If the root is a select (min/max idiom), the insert
9612       // point is the compare condition of that select.
9613       Instruction *RdxRootInst = cast<Instruction>(ReductionRoot);
9614       if (isCmpSelMinMax(RdxRootInst))
9615         Builder.SetInsertPoint(getCmpForMinMaxReduction(RdxRootInst));
9616       else
9617         Builder.SetInsertPoint(RdxRootInst);
9618 
9619       // To prevent poison from leaking across what used to be sequential, safe,
9620       // scalar boolean logic operations, the reduction operand must be frozen.
9621       if (isa<SelectInst>(RdxRootInst) && isBoolLogicOp(RdxRootInst))
9622         VectorizedRoot = Builder.CreateFreeze(VectorizedRoot);
9623 
9624       Value *ReducedSubTree =
9625           emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI);
9626 
9627       if (!VectorizedTree) {
9628         // Initialize the final value in the reduction.
9629         VectorizedTree = ReducedSubTree;
9630       } else {
9631         // Update the final value in the reduction.
9632         Builder.SetCurrentDebugLocation(Loc);
9633         VectorizedTree = createOp(Builder, RdxKind, VectorizedTree,
9634                                   ReducedSubTree, "op.rdx", ReductionOps);
9635       }
9636       i += ReduxWidth;
9637       ReduxWidth = PowerOf2Floor(NumReducedVals - i);
9638     }
9639 
9640     if (VectorizedTree) {
9641       // Finish the reduction.
9642       for (; i < NumReducedVals; ++i) {
9643         auto *I = cast<Instruction>(ReducedVals[i]);
9644         Builder.SetCurrentDebugLocation(I->getDebugLoc());
9645         VectorizedTree =
9646             createOp(Builder, RdxKind, VectorizedTree, I, "", ReductionOps);
9647       }
9648       for (auto &Pair : ExternallyUsedValues) {
9649         // Add each externally used value to the final reduction.
9650         for (auto *I : Pair.second) {
9651           Builder.SetCurrentDebugLocation(I->getDebugLoc());
9652           VectorizedTree = createOp(Builder, RdxKind, VectorizedTree,
9653                                     Pair.first, "op.extra", I);
9654         }
9655       }
9656 
9657       ReductionRoot->replaceAllUsesWith(VectorizedTree);
9658 
9659       // Mark all scalar reduction ops for deletion, they are replaced by the
9660       // vector reductions.
9661       V.eraseInstructions(IgnoreList);
9662     }
9663     return VectorizedTree;
9664   }
9665 
9666   unsigned numReductionValues() const { return ReducedVals.size(); }
9667 
9668 private:
9669   /// Calculate the cost of a reduction.
9670   InstructionCost getReductionCost(TargetTransformInfo *TTI,
9671                                    Value *FirstReducedVal, unsigned ReduxWidth,
9672                                    FastMathFlags FMF) {
9673     TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
9674     Type *ScalarTy = FirstReducedVal->getType();
9675     FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth);
9676     InstructionCost VectorCost, ScalarCost;
9677     switch (RdxKind) {
9678     case RecurKind::Add:
9679     case RecurKind::Mul:
9680     case RecurKind::Or:
9681     case RecurKind::And:
9682     case RecurKind::Xor:
9683     case RecurKind::FAdd:
9684     case RecurKind::FMul: {
9685       unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind);
9686       VectorCost =
9687           TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF, CostKind);
9688       ScalarCost = TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy, CostKind);
9689       break;
9690     }
9691     case RecurKind::FMax:
9692     case RecurKind::FMin: {
9693       auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy);
9694       auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy));
9695       VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy,
9696                                                /*IsUnsigned=*/false, CostKind);
9697       CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind);
9698       ScalarCost = TTI->getCmpSelInstrCost(Instruction::FCmp, ScalarTy,
9699                                            SclCondTy, RdxPred, CostKind) +
9700                    TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
9701                                            SclCondTy, RdxPred, CostKind);
9702       break;
9703     }
9704     case RecurKind::SMax:
9705     case RecurKind::SMin:
9706     case RecurKind::UMax:
9707     case RecurKind::UMin: {
9708       auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy);
9709       auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy));
9710       bool IsUnsigned =
9711           RdxKind == RecurKind::UMax || RdxKind == RecurKind::UMin;
9712       VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy, IsUnsigned,
9713                                                CostKind);
9714       CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind);
9715       ScalarCost = TTI->getCmpSelInstrCost(Instruction::ICmp, ScalarTy,
9716                                            SclCondTy, RdxPred, CostKind) +
9717                    TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
9718                                            SclCondTy, RdxPred, CostKind);
9719       break;
9720     }
9721     default:
9722       llvm_unreachable("Expected arithmetic or min/max reduction operation");
9723     }
9724 
9725     // Scalar cost is repeated for N-1 elements.
9726     ScalarCost *= (ReduxWidth - 1);
9727     LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost
9728                       << " for reduction that starts with " << *FirstReducedVal
9729                       << " (It is a splitting reduction)\n");
9730     return VectorCost - ScalarCost;
9731   }
9732 
9733   /// Emit a horizontal reduction of the vectorized value.
9734   Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder,
9735                        unsigned ReduxWidth, const TargetTransformInfo *TTI) {
9736     assert(VectorizedValue && "Need to have a vectorized tree node");
9737     assert(isPowerOf2_32(ReduxWidth) &&
9738            "We only handle power-of-two reductions for now");
9739     assert(RdxKind != RecurKind::FMulAdd &&
9740            "A call to the llvm.fmuladd intrinsic is not handled yet");
9741 
9742     ++NumVectorInstructions;
9743     return createSimpleTargetReduction(Builder, TTI, VectorizedValue, RdxKind);
9744   }
9745 };
9746 
9747 } // end anonymous namespace
9748 
9749 static Optional<unsigned> getAggregateSize(Instruction *InsertInst) {
9750   if (auto *IE = dyn_cast<InsertElementInst>(InsertInst))
9751     return cast<FixedVectorType>(IE->getType())->getNumElements();
9752 
9753   unsigned AggregateSize = 1;
9754   auto *IV = cast<InsertValueInst>(InsertInst);
9755   Type *CurrentType = IV->getType();
9756   do {
9757     if (auto *ST = dyn_cast<StructType>(CurrentType)) {
9758       for (auto *Elt : ST->elements())
9759         if (Elt != ST->getElementType(0)) // check homogeneity
9760           return None;
9761       AggregateSize *= ST->getNumElements();
9762       CurrentType = ST->getElementType(0);
9763     } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) {
9764       AggregateSize *= AT->getNumElements();
9765       CurrentType = AT->getElementType();
9766     } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) {
9767       AggregateSize *= VT->getNumElements();
9768       return AggregateSize;
9769     } else if (CurrentType->isSingleValueType()) {
9770       return AggregateSize;
9771     } else {
9772       return None;
9773     }
9774   } while (true);
9775 }
9776 
9777 static void findBuildAggregate_rec(Instruction *LastInsertInst,
9778                                    TargetTransformInfo *TTI,
9779                                    SmallVectorImpl<Value *> &BuildVectorOpds,
9780                                    SmallVectorImpl<Value *> &InsertElts,
9781                                    unsigned OperandOffset) {
9782   do {
9783     Value *InsertedOperand = LastInsertInst->getOperand(1);
9784     Optional<unsigned> OperandIndex =
9785         getInsertIndex(LastInsertInst, OperandOffset);
9786     if (!OperandIndex)
9787       return;
9788     if (isa<InsertElementInst>(InsertedOperand) ||
9789         isa<InsertValueInst>(InsertedOperand)) {
9790       findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI,
9791                              BuildVectorOpds, InsertElts, *OperandIndex);
9792 
9793     } else {
9794       BuildVectorOpds[*OperandIndex] = InsertedOperand;
9795       InsertElts[*OperandIndex] = LastInsertInst;
9796     }
9797     LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0));
9798   } while (LastInsertInst != nullptr &&
9799            (isa<InsertValueInst>(LastInsertInst) ||
9800             isa<InsertElementInst>(LastInsertInst)) &&
9801            LastInsertInst->hasOneUse());
9802 }
9803 
9804 /// Recognize construction of vectors like
9805 ///  %ra = insertelement <4 x float> poison, float %s0, i32 0
9806 ///  %rb = insertelement <4 x float> %ra, float %s1, i32 1
9807 ///  %rc = insertelement <4 x float> %rb, float %s2, i32 2
9808 ///  %rd = insertelement <4 x float> %rc, float %s3, i32 3
9809 ///  starting from the last insertelement or insertvalue instruction.
9810 ///
9811 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>},
9812 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on.
9813 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples.
9814 ///
9815 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type.
9816 ///
9817 /// \return true if it matches.
9818 static bool findBuildAggregate(Instruction *LastInsertInst,
9819                                TargetTransformInfo *TTI,
9820                                SmallVectorImpl<Value *> &BuildVectorOpds,
9821                                SmallVectorImpl<Value *> &InsertElts) {
9822 
9823   assert((isa<InsertElementInst>(LastInsertInst) ||
9824           isa<InsertValueInst>(LastInsertInst)) &&
9825          "Expected insertelement or insertvalue instruction!");
9826 
9827   assert((BuildVectorOpds.empty() && InsertElts.empty()) &&
9828          "Expected empty result vectors!");
9829 
9830   Optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst);
9831   if (!AggregateSize)
9832     return false;
9833   BuildVectorOpds.resize(*AggregateSize);
9834   InsertElts.resize(*AggregateSize);
9835 
9836   findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 0);
9837   llvm::erase_value(BuildVectorOpds, nullptr);
9838   llvm::erase_value(InsertElts, nullptr);
9839   if (BuildVectorOpds.size() >= 2)
9840     return true;
9841 
9842   return false;
9843 }
9844 
9845 /// Try and get a reduction value from a phi node.
9846 ///
9847 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions
9848 /// if they come from either \p ParentBB or a containing loop latch.
9849 ///
9850 /// \returns A candidate reduction value if possible, or \code nullptr \endcode
9851 /// if not possible.
9852 static Value *getReductionValue(const DominatorTree *DT, PHINode *P,
9853                                 BasicBlock *ParentBB, LoopInfo *LI) {
9854   // There are situations where the reduction value is not dominated by the
9855   // reduction phi. Vectorizing such cases has been reported to cause
9856   // miscompiles. See PR25787.
9857   auto DominatedReduxValue = [&](Value *R) {
9858     return isa<Instruction>(R) &&
9859            DT->dominates(P->getParent(), cast<Instruction>(R)->getParent());
9860   };
9861 
9862   Value *Rdx = nullptr;
9863 
9864   // Return the incoming value if it comes from the same BB as the phi node.
9865   if (P->getIncomingBlock(0) == ParentBB) {
9866     Rdx = P->getIncomingValue(0);
9867   } else if (P->getIncomingBlock(1) == ParentBB) {
9868     Rdx = P->getIncomingValue(1);
9869   }
9870 
9871   if (Rdx && DominatedReduxValue(Rdx))
9872     return Rdx;
9873 
9874   // Otherwise, check whether we have a loop latch to look at.
9875   Loop *BBL = LI->getLoopFor(ParentBB);
9876   if (!BBL)
9877     return nullptr;
9878   BasicBlock *BBLatch = BBL->getLoopLatch();
9879   if (!BBLatch)
9880     return nullptr;
9881 
9882   // There is a loop latch, return the incoming value if it comes from
9883   // that. This reduction pattern occasionally turns up.
9884   if (P->getIncomingBlock(0) == BBLatch) {
9885     Rdx = P->getIncomingValue(0);
9886   } else if (P->getIncomingBlock(1) == BBLatch) {
9887     Rdx = P->getIncomingValue(1);
9888   }
9889 
9890   if (Rdx && DominatedReduxValue(Rdx))
9891     return Rdx;
9892 
9893   return nullptr;
9894 }
9895 
9896 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) {
9897   if (match(I, m_BinOp(m_Value(V0), m_Value(V1))))
9898     return true;
9899   if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1))))
9900     return true;
9901   if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1))))
9902     return true;
9903   if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1))))
9904     return true;
9905   if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1))))
9906     return true;
9907   if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1))))
9908     return true;
9909   if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1))))
9910     return true;
9911   return false;
9912 }
9913 
9914 /// Attempt to reduce a horizontal reduction.
9915 /// If it is legal to match a horizontal reduction feeding the phi node \a P
9916 /// with reduction operators \a Root (or one of its operands) in a basic block
9917 /// \a BB, then check if it can be done. If horizontal reduction is not found
9918 /// and root instruction is a binary operation, vectorization of the operands is
9919 /// attempted.
9920 /// \returns true if a horizontal reduction was matched and reduced or operands
9921 /// of one of the binary instruction were vectorized.
9922 /// \returns false if a horizontal reduction was not matched (or not possible)
9923 /// or no vectorization of any binary operation feeding \a Root instruction was
9924 /// performed.
9925 static bool tryToVectorizeHorReductionOrInstOperands(
9926     PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R,
9927     TargetTransformInfo *TTI,
9928     const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) {
9929   if (!ShouldVectorizeHor)
9930     return false;
9931 
9932   if (!Root)
9933     return false;
9934 
9935   if (Root->getParent() != BB || isa<PHINode>(Root))
9936     return false;
9937   // Start analysis starting from Root instruction. If horizontal reduction is
9938   // found, try to vectorize it. If it is not a horizontal reduction or
9939   // vectorization is not possible or not effective, and currently analyzed
9940   // instruction is a binary operation, try to vectorize the operands, using
9941   // pre-order DFS traversal order. If the operands were not vectorized, repeat
9942   // the same procedure considering each operand as a possible root of the
9943   // horizontal reduction.
9944   // Interrupt the process if the Root instruction itself was vectorized or all
9945   // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized.
9946   // Skip the analysis of CmpInsts.Compiler implements postanalysis of the
9947   // CmpInsts so we can skip extra attempts in
9948   // tryToVectorizeHorReductionOrInstOperands and save compile time.
9949   std::queue<std::pair<Instruction *, unsigned>> Stack;
9950   Stack.emplace(Root, 0);
9951   SmallPtrSet<Value *, 8> VisitedInstrs;
9952   SmallVector<WeakTrackingVH> PostponedInsts;
9953   bool Res = false;
9954   auto &&TryToReduce = [TTI, &P, &R](Instruction *Inst, Value *&B0,
9955                                      Value *&B1) -> Value * {
9956     bool IsBinop = matchRdxBop(Inst, B0, B1);
9957     bool IsSelect = match(Inst, m_Select(m_Value(), m_Value(), m_Value()));
9958     if (IsBinop || IsSelect) {
9959       HorizontalReduction HorRdx;
9960       if (HorRdx.matchAssociativeReduction(P, Inst))
9961         return HorRdx.tryToReduce(R, TTI);
9962     }
9963     return nullptr;
9964   };
9965   while (!Stack.empty()) {
9966     Instruction *Inst;
9967     unsigned Level;
9968     std::tie(Inst, Level) = Stack.front();
9969     Stack.pop();
9970     // Do not try to analyze instruction that has already been vectorized.
9971     // This may happen when we vectorize instruction operands on a previous
9972     // iteration while stack was populated before that happened.
9973     if (R.isDeleted(Inst))
9974       continue;
9975     Value *B0 = nullptr, *B1 = nullptr;
9976     if (Value *V = TryToReduce(Inst, B0, B1)) {
9977       Res = true;
9978       // Set P to nullptr to avoid re-analysis of phi node in
9979       // matchAssociativeReduction function unless this is the root node.
9980       P = nullptr;
9981       if (auto *I = dyn_cast<Instruction>(V)) {
9982         // Try to find another reduction.
9983         Stack.emplace(I, Level);
9984         continue;
9985       }
9986     } else {
9987       bool IsBinop = B0 && B1;
9988       if (P && IsBinop) {
9989         Inst = dyn_cast<Instruction>(B0);
9990         if (Inst == P)
9991           Inst = dyn_cast<Instruction>(B1);
9992         if (!Inst) {
9993           // Set P to nullptr to avoid re-analysis of phi node in
9994           // matchAssociativeReduction function unless this is the root node.
9995           P = nullptr;
9996           continue;
9997         }
9998       }
9999       // Set P to nullptr to avoid re-analysis of phi node in
10000       // matchAssociativeReduction function unless this is the root node.
10001       P = nullptr;
10002       // Do not try to vectorize CmpInst operands, this is done separately.
10003       // Final attempt for binop args vectorization should happen after the loop
10004       // to try to find reductions.
10005       if (!isa<CmpInst>(Inst))
10006         PostponedInsts.push_back(Inst);
10007     }
10008 
10009     // Try to vectorize operands.
10010     // Continue analysis for the instruction from the same basic block only to
10011     // save compile time.
10012     if (++Level < RecursionMaxDepth)
10013       for (auto *Op : Inst->operand_values())
10014         if (VisitedInstrs.insert(Op).second)
10015           if (auto *I = dyn_cast<Instruction>(Op))
10016             // Do not try to vectorize CmpInst operands,  this is done
10017             // separately.
10018             if (!isa<PHINode>(I) && !isa<CmpInst>(I) && !R.isDeleted(I) &&
10019                 I->getParent() == BB)
10020               Stack.emplace(I, Level);
10021   }
10022   // Try to vectorized binops where reductions were not found.
10023   for (Value *V : PostponedInsts)
10024     if (auto *Inst = dyn_cast<Instruction>(V))
10025       if (!R.isDeleted(Inst))
10026         Res |= Vectorize(Inst, R);
10027   return Res;
10028 }
10029 
10030 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V,
10031                                                  BasicBlock *BB, BoUpSLP &R,
10032                                                  TargetTransformInfo *TTI) {
10033   auto *I = dyn_cast_or_null<Instruction>(V);
10034   if (!I)
10035     return false;
10036 
10037   if (!isa<BinaryOperator>(I))
10038     P = nullptr;
10039   // Try to match and vectorize a horizontal reduction.
10040   auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool {
10041     return tryToVectorize(I, R);
10042   };
10043   return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI,
10044                                                   ExtraVectorization);
10045 }
10046 
10047 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI,
10048                                                  BasicBlock *BB, BoUpSLP &R) {
10049   const DataLayout &DL = BB->getModule()->getDataLayout();
10050   if (!R.canMapToVector(IVI->getType(), DL))
10051     return false;
10052 
10053   SmallVector<Value *, 16> BuildVectorOpds;
10054   SmallVector<Value *, 16> BuildVectorInsts;
10055   if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts))
10056     return false;
10057 
10058   LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n");
10059   // Aggregate value is unlikely to be processed in vector register.
10060   return tryToVectorizeList(BuildVectorOpds, R);
10061 }
10062 
10063 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI,
10064                                                    BasicBlock *BB, BoUpSLP &R) {
10065   SmallVector<Value *, 16> BuildVectorInsts;
10066   SmallVector<Value *, 16> BuildVectorOpds;
10067   SmallVector<int> Mask;
10068   if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) ||
10069       (llvm::all_of(
10070            BuildVectorOpds,
10071            [](Value *V) { return isa<ExtractElementInst, UndefValue>(V); }) &&
10072        isFixedVectorShuffle(BuildVectorOpds, Mask)))
10073     return false;
10074 
10075   LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n");
10076   return tryToVectorizeList(BuildVectorInsts, R);
10077 }
10078 
10079 template <typename T>
10080 static bool
10081 tryToVectorizeSequence(SmallVectorImpl<T *> &Incoming,
10082                        function_ref<unsigned(T *)> Limit,
10083                        function_ref<bool(T *, T *)> Comparator,
10084                        function_ref<bool(T *, T *)> AreCompatible,
10085                        function_ref<bool(ArrayRef<T *>, bool)> TryToVectorizeHelper,
10086                        bool LimitForRegisterSize) {
10087   bool Changed = false;
10088   // Sort by type, parent, operands.
10089   stable_sort(Incoming, Comparator);
10090 
10091   // Try to vectorize elements base on their type.
10092   SmallVector<T *> Candidates;
10093   for (auto *IncIt = Incoming.begin(), *E = Incoming.end(); IncIt != E;) {
10094     // Look for the next elements with the same type, parent and operand
10095     // kinds.
10096     auto *SameTypeIt = IncIt;
10097     while (SameTypeIt != E && AreCompatible(*SameTypeIt, *IncIt))
10098       ++SameTypeIt;
10099 
10100     // Try to vectorize them.
10101     unsigned NumElts = (SameTypeIt - IncIt);
10102     LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at nodes ("
10103                       << NumElts << ")\n");
10104     // The vectorization is a 3-state attempt:
10105     // 1. Try to vectorize instructions with the same/alternate opcodes with the
10106     // size of maximal register at first.
10107     // 2. Try to vectorize remaining instructions with the same type, if
10108     // possible. This may result in the better vectorization results rather than
10109     // if we try just to vectorize instructions with the same/alternate opcodes.
10110     // 3. Final attempt to try to vectorize all instructions with the
10111     // same/alternate ops only, this may result in some extra final
10112     // vectorization.
10113     if (NumElts > 1 &&
10114         TryToVectorizeHelper(makeArrayRef(IncIt, NumElts), LimitForRegisterSize)) {
10115       // Success start over because instructions might have been changed.
10116       Changed = true;
10117     } else if (NumElts < Limit(*IncIt) &&
10118                (Candidates.empty() ||
10119                 Candidates.front()->getType() == (*IncIt)->getType())) {
10120       Candidates.append(IncIt, std::next(IncIt, NumElts));
10121     }
10122     // Final attempt to vectorize instructions with the same types.
10123     if (Candidates.size() > 1 &&
10124         (SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType())) {
10125       if (TryToVectorizeHelper(Candidates, /*LimitForRegisterSize=*/false)) {
10126         // Success start over because instructions might have been changed.
10127         Changed = true;
10128       } else if (LimitForRegisterSize) {
10129         // Try to vectorize using small vectors.
10130         for (auto *It = Candidates.begin(), *End = Candidates.end();
10131              It != End;) {
10132           auto *SameTypeIt = It;
10133           while (SameTypeIt != End && AreCompatible(*SameTypeIt, *It))
10134             ++SameTypeIt;
10135           unsigned NumElts = (SameTypeIt - It);
10136           if (NumElts > 1 && TryToVectorizeHelper(makeArrayRef(It, NumElts),
10137                                             /*LimitForRegisterSize=*/false))
10138             Changed = true;
10139           It = SameTypeIt;
10140         }
10141       }
10142       Candidates.clear();
10143     }
10144 
10145     // Start over at the next instruction of a different type (or the end).
10146     IncIt = SameTypeIt;
10147   }
10148   return Changed;
10149 }
10150 
10151 /// Compare two cmp instructions. If IsCompatibility is true, function returns
10152 /// true if 2 cmps have same/swapped predicates and mos compatible corresponding
10153 /// operands. If IsCompatibility is false, function implements strict weak
10154 /// ordering relation between two cmp instructions, returning true if the first
10155 /// instruction is "less" than the second, i.e. its predicate is less than the
10156 /// predicate of the second or the operands IDs are less than the operands IDs
10157 /// of the second cmp instruction.
10158 template <bool IsCompatibility>
10159 static bool compareCmp(Value *V, Value *V2,
10160                        function_ref<bool(Instruction *)> IsDeleted) {
10161   auto *CI1 = cast<CmpInst>(V);
10162   auto *CI2 = cast<CmpInst>(V2);
10163   if (IsDeleted(CI2) || !isValidElementType(CI2->getType()))
10164     return false;
10165   if (CI1->getOperand(0)->getType()->getTypeID() <
10166       CI2->getOperand(0)->getType()->getTypeID())
10167     return !IsCompatibility;
10168   if (CI1->getOperand(0)->getType()->getTypeID() >
10169       CI2->getOperand(0)->getType()->getTypeID())
10170     return false;
10171   CmpInst::Predicate Pred1 = CI1->getPredicate();
10172   CmpInst::Predicate Pred2 = CI2->getPredicate();
10173   CmpInst::Predicate SwapPred1 = CmpInst::getSwappedPredicate(Pred1);
10174   CmpInst::Predicate SwapPred2 = CmpInst::getSwappedPredicate(Pred2);
10175   CmpInst::Predicate BasePred1 = std::min(Pred1, SwapPred1);
10176   CmpInst::Predicate BasePred2 = std::min(Pred2, SwapPred2);
10177   if (BasePred1 < BasePred2)
10178     return !IsCompatibility;
10179   if (BasePred1 > BasePred2)
10180     return false;
10181   // Compare operands.
10182   bool LEPreds = Pred1 <= Pred2;
10183   bool GEPreds = Pred1 >= Pred2;
10184   for (int I = 0, E = CI1->getNumOperands(); I < E; ++I) {
10185     auto *Op1 = CI1->getOperand(LEPreds ? I : E - I - 1);
10186     auto *Op2 = CI2->getOperand(GEPreds ? I : E - I - 1);
10187     if (Op1->getValueID() < Op2->getValueID())
10188       return !IsCompatibility;
10189     if (Op1->getValueID() > Op2->getValueID())
10190       return false;
10191     if (auto *I1 = dyn_cast<Instruction>(Op1))
10192       if (auto *I2 = dyn_cast<Instruction>(Op2)) {
10193         if (I1->getParent() != I2->getParent())
10194           return false;
10195         InstructionsState S = getSameOpcode({I1, I2});
10196         if (S.getOpcode())
10197           continue;
10198         return false;
10199       }
10200   }
10201   return IsCompatibility;
10202 }
10203 
10204 bool SLPVectorizerPass::vectorizeSimpleInstructions(
10205     SmallVectorImpl<Instruction *> &Instructions, BasicBlock *BB, BoUpSLP &R,
10206     bool AtTerminator) {
10207   bool OpsChanged = false;
10208   SmallVector<Instruction *, 4> PostponedCmps;
10209   for (auto *I : reverse(Instructions)) {
10210     if (R.isDeleted(I))
10211       continue;
10212     if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I))
10213       OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R);
10214     else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I))
10215       OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R);
10216     else if (isa<CmpInst>(I))
10217       PostponedCmps.push_back(I);
10218   }
10219   if (AtTerminator) {
10220     // Try to find reductions first.
10221     for (Instruction *I : PostponedCmps) {
10222       if (R.isDeleted(I))
10223         continue;
10224       for (Value *Op : I->operands())
10225         OpsChanged |= vectorizeRootInstruction(nullptr, Op, BB, R, TTI);
10226     }
10227     // Try to vectorize operands as vector bundles.
10228     for (Instruction *I : PostponedCmps) {
10229       if (R.isDeleted(I))
10230         continue;
10231       OpsChanged |= tryToVectorize(I, R);
10232     }
10233     // Try to vectorize list of compares.
10234     // Sort by type, compare predicate, etc.
10235     auto &&CompareSorter = [&R](Value *V, Value *V2) {
10236       return compareCmp<false>(V, V2,
10237                                [&R](Instruction *I) { return R.isDeleted(I); });
10238     };
10239 
10240     auto &&AreCompatibleCompares = [&R](Value *V1, Value *V2) {
10241       if (V1 == V2)
10242         return true;
10243       return compareCmp<true>(V1, V2,
10244                               [&R](Instruction *I) { return R.isDeleted(I); });
10245     };
10246     auto Limit = [&R](Value *V) {
10247       unsigned EltSize = R.getVectorElementSize(V);
10248       return std::max(2U, R.getMaxVecRegSize() / EltSize);
10249     };
10250 
10251     SmallVector<Value *> Vals(PostponedCmps.begin(), PostponedCmps.end());
10252     OpsChanged |= tryToVectorizeSequence<Value>(
10253         Vals, Limit, CompareSorter, AreCompatibleCompares,
10254         [this, &R](ArrayRef<Value *> Candidates, bool LimitForRegisterSize) {
10255           // Exclude possible reductions from other blocks.
10256           bool ArePossiblyReducedInOtherBlock =
10257               any_of(Candidates, [](Value *V) {
10258                 return any_of(V->users(), [V](User *U) {
10259                   return isa<SelectInst>(U) &&
10260                          cast<SelectInst>(U)->getParent() !=
10261                              cast<Instruction>(V)->getParent();
10262                 });
10263               });
10264           if (ArePossiblyReducedInOtherBlock)
10265             return false;
10266           return tryToVectorizeList(Candidates, R, LimitForRegisterSize);
10267         },
10268         /*LimitForRegisterSize=*/true);
10269     Instructions.clear();
10270   } else {
10271     // Insert in reverse order since the PostponedCmps vector was filled in
10272     // reverse order.
10273     Instructions.assign(PostponedCmps.rbegin(), PostponedCmps.rend());
10274   }
10275   return OpsChanged;
10276 }
10277 
10278 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
10279   bool Changed = false;
10280   SmallVector<Value *, 4> Incoming;
10281   SmallPtrSet<Value *, 16> VisitedInstrs;
10282   // Maps phi nodes to the non-phi nodes found in the use tree for each phi
10283   // node. Allows better to identify the chains that can be vectorized in the
10284   // better way.
10285   DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes;
10286   auto PHICompare = [this, &PHIToOpcodes](Value *V1, Value *V2) {
10287     assert(isValidElementType(V1->getType()) &&
10288            isValidElementType(V2->getType()) &&
10289            "Expected vectorizable types only.");
10290     // It is fine to compare type IDs here, since we expect only vectorizable
10291     // types, like ints, floats and pointers, we don't care about other type.
10292     if (V1->getType()->getTypeID() < V2->getType()->getTypeID())
10293       return true;
10294     if (V1->getType()->getTypeID() > V2->getType()->getTypeID())
10295       return false;
10296     ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1];
10297     ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2];
10298     if (Opcodes1.size() < Opcodes2.size())
10299       return true;
10300     if (Opcodes1.size() > Opcodes2.size())
10301       return false;
10302     Optional<bool> ConstOrder;
10303     for (int I = 0, E = Opcodes1.size(); I < E; ++I) {
10304       // Undefs are compatible with any other value.
10305       if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) {
10306         if (!ConstOrder)
10307           ConstOrder =
10308               !isa<UndefValue>(Opcodes1[I]) && isa<UndefValue>(Opcodes2[I]);
10309         continue;
10310       }
10311       if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I]))
10312         if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) {
10313           DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent());
10314           DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent());
10315           if (!NodeI1)
10316             return NodeI2 != nullptr;
10317           if (!NodeI2)
10318             return false;
10319           assert((NodeI1 == NodeI2) ==
10320                      (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&
10321                  "Different nodes should have different DFS numbers");
10322           if (NodeI1 != NodeI2)
10323             return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn();
10324           InstructionsState S = getSameOpcode({I1, I2});
10325           if (S.getOpcode())
10326             continue;
10327           return I1->getOpcode() < I2->getOpcode();
10328         }
10329       if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) {
10330         if (!ConstOrder)
10331           ConstOrder = Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID();
10332         continue;
10333       }
10334       if (Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID())
10335         return true;
10336       if (Opcodes1[I]->getValueID() > Opcodes2[I]->getValueID())
10337         return false;
10338     }
10339     return ConstOrder && *ConstOrder;
10340   };
10341   auto AreCompatiblePHIs = [&PHIToOpcodes](Value *V1, Value *V2) {
10342     if (V1 == V2)
10343       return true;
10344     if (V1->getType() != V2->getType())
10345       return false;
10346     ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1];
10347     ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2];
10348     if (Opcodes1.size() != Opcodes2.size())
10349       return false;
10350     for (int I = 0, E = Opcodes1.size(); I < E; ++I) {
10351       // Undefs are compatible with any other value.
10352       if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I]))
10353         continue;
10354       if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I]))
10355         if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) {
10356           if (I1->getParent() != I2->getParent())
10357             return false;
10358           InstructionsState S = getSameOpcode({I1, I2});
10359           if (S.getOpcode())
10360             continue;
10361           return false;
10362         }
10363       if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I]))
10364         continue;
10365       if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID())
10366         return false;
10367     }
10368     return true;
10369   };
10370   auto Limit = [&R](Value *V) {
10371     unsigned EltSize = R.getVectorElementSize(V);
10372     return std::max(2U, R.getMaxVecRegSize() / EltSize);
10373   };
10374 
10375   bool HaveVectorizedPhiNodes = false;
10376   do {
10377     // Collect the incoming values from the PHIs.
10378     Incoming.clear();
10379     for (Instruction &I : *BB) {
10380       PHINode *P = dyn_cast<PHINode>(&I);
10381       if (!P)
10382         break;
10383 
10384       // No need to analyze deleted, vectorized and non-vectorizable
10385       // instructions.
10386       if (!VisitedInstrs.count(P) && !R.isDeleted(P) &&
10387           isValidElementType(P->getType()))
10388         Incoming.push_back(P);
10389     }
10390 
10391     // Find the corresponding non-phi nodes for better matching when trying to
10392     // build the tree.
10393     for (Value *V : Incoming) {
10394       SmallVectorImpl<Value *> &Opcodes =
10395           PHIToOpcodes.try_emplace(V).first->getSecond();
10396       if (!Opcodes.empty())
10397         continue;
10398       SmallVector<Value *, 4> Nodes(1, V);
10399       SmallPtrSet<Value *, 4> Visited;
10400       while (!Nodes.empty()) {
10401         auto *PHI = cast<PHINode>(Nodes.pop_back_val());
10402         if (!Visited.insert(PHI).second)
10403           continue;
10404         for (Value *V : PHI->incoming_values()) {
10405           if (auto *PHI1 = dyn_cast<PHINode>((V))) {
10406             Nodes.push_back(PHI1);
10407             continue;
10408           }
10409           Opcodes.emplace_back(V);
10410         }
10411       }
10412     }
10413 
10414     HaveVectorizedPhiNodes = tryToVectorizeSequence<Value>(
10415         Incoming, Limit, PHICompare, AreCompatiblePHIs,
10416         [this, &R](ArrayRef<Value *> Candidates, bool LimitForRegisterSize) {
10417           return tryToVectorizeList(Candidates, R, LimitForRegisterSize);
10418         },
10419         /*LimitForRegisterSize=*/true);
10420     Changed |= HaveVectorizedPhiNodes;
10421     VisitedInstrs.insert(Incoming.begin(), Incoming.end());
10422   } while (HaveVectorizedPhiNodes);
10423 
10424   VisitedInstrs.clear();
10425 
10426   SmallVector<Instruction *, 8> PostProcessInstructions;
10427   SmallDenseSet<Instruction *, 4> KeyNodes;
10428   for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
10429     // Skip instructions with scalable type. The num of elements is unknown at
10430     // compile-time for scalable type.
10431     if (isa<ScalableVectorType>(it->getType()))
10432       continue;
10433 
10434     // Skip instructions marked for the deletion.
10435     if (R.isDeleted(&*it))
10436       continue;
10437     // We may go through BB multiple times so skip the one we have checked.
10438     if (!VisitedInstrs.insert(&*it).second) {
10439       if (it->use_empty() && KeyNodes.contains(&*it) &&
10440           vectorizeSimpleInstructions(PostProcessInstructions, BB, R,
10441                                       it->isTerminator())) {
10442         // We would like to start over since some instructions are deleted
10443         // and the iterator may become invalid value.
10444         Changed = true;
10445         it = BB->begin();
10446         e = BB->end();
10447       }
10448       continue;
10449     }
10450 
10451     if (isa<DbgInfoIntrinsic>(it))
10452       continue;
10453 
10454     // Try to vectorize reductions that use PHINodes.
10455     if (PHINode *P = dyn_cast<PHINode>(it)) {
10456       // Check that the PHI is a reduction PHI.
10457       if (P->getNumIncomingValues() == 2) {
10458         // Try to match and vectorize a horizontal reduction.
10459         if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R,
10460                                      TTI)) {
10461           Changed = true;
10462           it = BB->begin();
10463           e = BB->end();
10464           continue;
10465         }
10466       }
10467       // Try to vectorize the incoming values of the PHI, to catch reductions
10468       // that feed into PHIs.
10469       for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) {
10470         // Skip if the incoming block is the current BB for now. Also, bypass
10471         // unreachable IR for efficiency and to avoid crashing.
10472         // TODO: Collect the skipped incoming values and try to vectorize them
10473         // after processing BB.
10474         if (BB == P->getIncomingBlock(I) ||
10475             !DT->isReachableFromEntry(P->getIncomingBlock(I)))
10476           continue;
10477 
10478         Changed |= vectorizeRootInstruction(nullptr, P->getIncomingValue(I),
10479                                             P->getIncomingBlock(I), R, TTI);
10480       }
10481       continue;
10482     }
10483 
10484     // Ran into an instruction without users, like terminator, or function call
10485     // with ignored return value, store. Ignore unused instructions (basing on
10486     // instruction type, except for CallInst and InvokeInst).
10487     if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) ||
10488                             isa<InvokeInst>(it))) {
10489       KeyNodes.insert(&*it);
10490       bool OpsChanged = false;
10491       if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) {
10492         for (auto *V : it->operand_values()) {
10493           // Try to match and vectorize a horizontal reduction.
10494           OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI);
10495         }
10496       }
10497       // Start vectorization of post-process list of instructions from the
10498       // top-tree instructions to try to vectorize as many instructions as
10499       // possible.
10500       OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R,
10501                                                 it->isTerminator());
10502       if (OpsChanged) {
10503         // We would like to start over since some instructions are deleted
10504         // and the iterator may become invalid value.
10505         Changed = true;
10506         it = BB->begin();
10507         e = BB->end();
10508         continue;
10509       }
10510     }
10511 
10512     if (isa<InsertElementInst>(it) || isa<CmpInst>(it) ||
10513         isa<InsertValueInst>(it))
10514       PostProcessInstructions.push_back(&*it);
10515   }
10516 
10517   return Changed;
10518 }
10519 
10520 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) {
10521   auto Changed = false;
10522   for (auto &Entry : GEPs) {
10523     // If the getelementptr list has fewer than two elements, there's nothing
10524     // to do.
10525     if (Entry.second.size() < 2)
10526       continue;
10527 
10528     LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "
10529                       << Entry.second.size() << ".\n");
10530 
10531     // Process the GEP list in chunks suitable for the target's supported
10532     // vector size. If a vector register can't hold 1 element, we are done. We
10533     // are trying to vectorize the index computations, so the maximum number of
10534     // elements is based on the size of the index expression, rather than the
10535     // size of the GEP itself (the target's pointer size).
10536     unsigned MaxVecRegSize = R.getMaxVecRegSize();
10537     unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin());
10538     if (MaxVecRegSize < EltSize)
10539       continue;
10540 
10541     unsigned MaxElts = MaxVecRegSize / EltSize;
10542     for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) {
10543       auto Len = std::min<unsigned>(BE - BI, MaxElts);
10544       ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len);
10545 
10546       // Initialize a set a candidate getelementptrs. Note that we use a
10547       // SetVector here to preserve program order. If the index computations
10548       // are vectorizable and begin with loads, we want to minimize the chance
10549       // of having to reorder them later.
10550       SetVector<Value *> Candidates(GEPList.begin(), GEPList.end());
10551 
10552       // Some of the candidates may have already been vectorized after we
10553       // initially collected them. If so, they are marked as deleted, so remove
10554       // them from the set of candidates.
10555       Candidates.remove_if(
10556           [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); });
10557 
10558       // Remove from the set of candidates all pairs of getelementptrs with
10559       // constant differences. Such getelementptrs are likely not good
10560       // candidates for vectorization in a bottom-up phase since one can be
10561       // computed from the other. We also ensure all candidate getelementptr
10562       // indices are unique.
10563       for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) {
10564         auto *GEPI = GEPList[I];
10565         if (!Candidates.count(GEPI))
10566           continue;
10567         auto *SCEVI = SE->getSCEV(GEPList[I]);
10568         for (int J = I + 1; J < E && Candidates.size() > 1; ++J) {
10569           auto *GEPJ = GEPList[J];
10570           auto *SCEVJ = SE->getSCEV(GEPList[J]);
10571           if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) {
10572             Candidates.remove(GEPI);
10573             Candidates.remove(GEPJ);
10574           } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) {
10575             Candidates.remove(GEPJ);
10576           }
10577         }
10578       }
10579 
10580       // We break out of the above computation as soon as we know there are
10581       // fewer than two candidates remaining.
10582       if (Candidates.size() < 2)
10583         continue;
10584 
10585       // Add the single, non-constant index of each candidate to the bundle. We
10586       // ensured the indices met these constraints when we originally collected
10587       // the getelementptrs.
10588       SmallVector<Value *, 16> Bundle(Candidates.size());
10589       auto BundleIndex = 0u;
10590       for (auto *V : Candidates) {
10591         auto *GEP = cast<GetElementPtrInst>(V);
10592         auto *GEPIdx = GEP->idx_begin()->get();
10593         assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx));
10594         Bundle[BundleIndex++] = GEPIdx;
10595       }
10596 
10597       // Try and vectorize the indices. We are currently only interested in
10598       // gather-like cases of the form:
10599       //
10600       // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ...
10601       //
10602       // where the loads of "a", the loads of "b", and the subtractions can be
10603       // performed in parallel. It's likely that detecting this pattern in a
10604       // bottom-up phase will be simpler and less costly than building a
10605       // full-blown top-down phase beginning at the consecutive loads.
10606       Changed |= tryToVectorizeList(Bundle, R);
10607     }
10608   }
10609   return Changed;
10610 }
10611 
10612 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) {
10613   bool Changed = false;
10614   // Sort by type, base pointers and values operand. Value operands must be
10615   // compatible (have the same opcode, same parent), otherwise it is
10616   // definitely not profitable to try to vectorize them.
10617   auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) {
10618     if (V->getPointerOperandType()->getTypeID() <
10619         V2->getPointerOperandType()->getTypeID())
10620       return true;
10621     if (V->getPointerOperandType()->getTypeID() >
10622         V2->getPointerOperandType()->getTypeID())
10623       return false;
10624     // UndefValues are compatible with all other values.
10625     if (isa<UndefValue>(V->getValueOperand()) ||
10626         isa<UndefValue>(V2->getValueOperand()))
10627       return false;
10628     if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand()))
10629       if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) {
10630         DomTreeNodeBase<llvm::BasicBlock> *NodeI1 =
10631             DT->getNode(I1->getParent());
10632         DomTreeNodeBase<llvm::BasicBlock> *NodeI2 =
10633             DT->getNode(I2->getParent());
10634         assert(NodeI1 && "Should only process reachable instructions");
10635         assert(NodeI1 && "Should only process reachable instructions");
10636         assert((NodeI1 == NodeI2) ==
10637                    (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&
10638                "Different nodes should have different DFS numbers");
10639         if (NodeI1 != NodeI2)
10640           return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn();
10641         InstructionsState S = getSameOpcode({I1, I2});
10642         if (S.getOpcode())
10643           return false;
10644         return I1->getOpcode() < I2->getOpcode();
10645       }
10646     if (isa<Constant>(V->getValueOperand()) &&
10647         isa<Constant>(V2->getValueOperand()))
10648       return false;
10649     return V->getValueOperand()->getValueID() <
10650            V2->getValueOperand()->getValueID();
10651   };
10652 
10653   auto &&AreCompatibleStores = [](StoreInst *V1, StoreInst *V2) {
10654     if (V1 == V2)
10655       return true;
10656     if (V1->getPointerOperandType() != V2->getPointerOperandType())
10657       return false;
10658     // Undefs are compatible with any other value.
10659     if (isa<UndefValue>(V1->getValueOperand()) ||
10660         isa<UndefValue>(V2->getValueOperand()))
10661       return true;
10662     if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand()))
10663       if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) {
10664         if (I1->getParent() != I2->getParent())
10665           return false;
10666         InstructionsState S = getSameOpcode({I1, I2});
10667         return S.getOpcode() > 0;
10668       }
10669     if (isa<Constant>(V1->getValueOperand()) &&
10670         isa<Constant>(V2->getValueOperand()))
10671       return true;
10672     return V1->getValueOperand()->getValueID() ==
10673            V2->getValueOperand()->getValueID();
10674   };
10675   auto Limit = [&R, this](StoreInst *SI) {
10676     unsigned EltSize = DL->getTypeSizeInBits(SI->getValueOperand()->getType());
10677     return R.getMinVF(EltSize);
10678   };
10679 
10680   // Attempt to sort and vectorize each of the store-groups.
10681   for (auto &Pair : Stores) {
10682     if (Pair.second.size() < 2)
10683       continue;
10684 
10685     LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
10686                       << Pair.second.size() << ".\n");
10687 
10688     if (!isValidElementType(Pair.second.front()->getValueOperand()->getType()))
10689       continue;
10690 
10691     Changed |= tryToVectorizeSequence<StoreInst>(
10692         Pair.second, Limit, StoreSorter, AreCompatibleStores,
10693         [this, &R](ArrayRef<StoreInst *> Candidates, bool) {
10694           return vectorizeStores(Candidates, R);
10695         },
10696         /*LimitForRegisterSize=*/false);
10697   }
10698   return Changed;
10699 }
10700 
10701 char SLPVectorizer::ID = 0;
10702 
10703 static const char lv_name[] = "SLP Vectorizer";
10704 
10705 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false)
10706 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
10707 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
10708 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
10709 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
10710 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
10711 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
10712 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
10713 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
10714 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false)
10715 
10716 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); }
10717