1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10 // stores that can be put together into vector-stores. Next, it attempts to
11 // construct vectorizable tree using the use-def chains. If a profitable tree
12 // was found, the SLP vectorizer performs vectorization on the tree.
13 //
14 // The pass is inspired by the work described in the paper:
15 //  "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
16 //
17 //===----------------------------------------------------------------------===//
18 
19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/DenseSet.h"
22 #include "llvm/ADT/Optional.h"
23 #include "llvm/ADT/PostOrderIterator.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SetVector.h"
26 #include "llvm/ADT/SmallBitVector.h"
27 #include "llvm/ADT/SmallPtrSet.h"
28 #include "llvm/ADT/SmallSet.h"
29 #include "llvm/ADT/SmallString.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/ADT/iterator.h"
32 #include "llvm/ADT/iterator_range.h"
33 #include "llvm/Analysis/AliasAnalysis.h"
34 #include "llvm/Analysis/AssumptionCache.h"
35 #include "llvm/Analysis/CodeMetrics.h"
36 #include "llvm/Analysis/DemandedBits.h"
37 #include "llvm/Analysis/GlobalsModRef.h"
38 #include "llvm/Analysis/IVDescriptors.h"
39 #include "llvm/Analysis/LoopAccessAnalysis.h"
40 #include "llvm/Analysis/LoopInfo.h"
41 #include "llvm/Analysis/MemoryLocation.h"
42 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
43 #include "llvm/Analysis/ScalarEvolution.h"
44 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
45 #include "llvm/Analysis/TargetLibraryInfo.h"
46 #include "llvm/Analysis/TargetTransformInfo.h"
47 #include "llvm/Analysis/ValueTracking.h"
48 #include "llvm/Analysis/VectorUtils.h"
49 #include "llvm/IR/Attributes.h"
50 #include "llvm/IR/BasicBlock.h"
51 #include "llvm/IR/Constant.h"
52 #include "llvm/IR/Constants.h"
53 #include "llvm/IR/DataLayout.h"
54 #include "llvm/IR/DebugLoc.h"
55 #include "llvm/IR/DerivedTypes.h"
56 #include "llvm/IR/Dominators.h"
57 #include "llvm/IR/Function.h"
58 #include "llvm/IR/IRBuilder.h"
59 #include "llvm/IR/InstrTypes.h"
60 #include "llvm/IR/Instruction.h"
61 #include "llvm/IR/Instructions.h"
62 #include "llvm/IR/IntrinsicInst.h"
63 #include "llvm/IR/Intrinsics.h"
64 #include "llvm/IR/Module.h"
65 #include "llvm/IR/NoFolder.h"
66 #include "llvm/IR/Operator.h"
67 #include "llvm/IR/PatternMatch.h"
68 #include "llvm/IR/Type.h"
69 #include "llvm/IR/Use.h"
70 #include "llvm/IR/User.h"
71 #include "llvm/IR/Value.h"
72 #include "llvm/IR/ValueHandle.h"
73 #include "llvm/IR/Verifier.h"
74 #include "llvm/InitializePasses.h"
75 #include "llvm/Pass.h"
76 #include "llvm/Support/Casting.h"
77 #include "llvm/Support/CommandLine.h"
78 #include "llvm/Support/Compiler.h"
79 #include "llvm/Support/DOTGraphTraits.h"
80 #include "llvm/Support/Debug.h"
81 #include "llvm/Support/ErrorHandling.h"
82 #include "llvm/Support/GraphWriter.h"
83 #include "llvm/Support/InstructionCost.h"
84 #include "llvm/Support/KnownBits.h"
85 #include "llvm/Support/MathExtras.h"
86 #include "llvm/Support/raw_ostream.h"
87 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
88 #include "llvm/Transforms/Utils/LoopUtils.h"
89 #include "llvm/Transforms/Vectorize.h"
90 #include <algorithm>
91 #include <cassert>
92 #include <cstdint>
93 #include <iterator>
94 #include <memory>
95 #include <set>
96 #include <string>
97 #include <tuple>
98 #include <utility>
99 #include <vector>
100 
101 using namespace llvm;
102 using namespace llvm::PatternMatch;
103 using namespace slpvectorizer;
104 
105 #define SV_NAME "slp-vectorizer"
106 #define DEBUG_TYPE "SLP"
107 
108 STATISTIC(NumVectorInstructions, "Number of vector instructions generated");
109 
110 cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden,
111                                   cl::desc("Run the SLP vectorization passes"));
112 
113 static cl::opt<int>
114     SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
115                      cl::desc("Only vectorize if you gain more than this "
116                               "number "));
117 
118 static cl::opt<bool>
119 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden,
120                    cl::desc("Attempt to vectorize horizontal reductions"));
121 
122 static cl::opt<bool> ShouldStartVectorizeHorAtStore(
123     "slp-vectorize-hor-store", cl::init(false), cl::Hidden,
124     cl::desc(
125         "Attempt to vectorize horizontal reductions feeding into a store"));
126 
127 static cl::opt<int>
128 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden,
129     cl::desc("Attempt to vectorize for this register size in bits"));
130 
131 static cl::opt<unsigned>
132 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden,
133     cl::desc("Maximum SLP vectorization factor (0=unlimited)"));
134 
135 static cl::opt<int>
136 MaxStoreLookup("slp-max-store-lookup", cl::init(32), cl::Hidden,
137     cl::desc("Maximum depth of the lookup for consecutive stores."));
138 
139 /// Limits the size of scheduling regions in a block.
140 /// It avoid long compile times for _very_ large blocks where vector
141 /// instructions are spread over a wide range.
142 /// This limit is way higher than needed by real-world functions.
143 static cl::opt<int>
144 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden,
145     cl::desc("Limit the size of the SLP scheduling region per block"));
146 
147 static cl::opt<int> MinVectorRegSizeOption(
148     "slp-min-reg-size", cl::init(128), cl::Hidden,
149     cl::desc("Attempt to vectorize for this register size in bits"));
150 
151 static cl::opt<unsigned> RecursionMaxDepth(
152     "slp-recursion-max-depth", cl::init(12), cl::Hidden,
153     cl::desc("Limit the recursion depth when building a vectorizable tree"));
154 
155 static cl::opt<unsigned> MinTreeSize(
156     "slp-min-tree-size", cl::init(3), cl::Hidden,
157     cl::desc("Only vectorize small trees if they are fully vectorizable"));
158 
159 // The maximum depth that the look-ahead score heuristic will explore.
160 // The higher this value, the higher the compilation time overhead.
161 static cl::opt<int> LookAheadMaxDepth(
162     "slp-max-look-ahead-depth", cl::init(2), cl::Hidden,
163     cl::desc("The maximum look-ahead depth for operand reordering scores"));
164 
165 // The Look-ahead heuristic goes through the users of the bundle to calculate
166 // the users cost in getExternalUsesCost(). To avoid compilation time increase
167 // we limit the number of users visited to this value.
168 static cl::opt<unsigned> LookAheadUsersBudget(
169     "slp-look-ahead-users-budget", cl::init(2), cl::Hidden,
170     cl::desc("The maximum number of users to visit while visiting the "
171              "predecessors. This prevents compilation time increase."));
172 
173 static cl::opt<bool>
174     ViewSLPTree("view-slp-tree", cl::Hidden,
175                 cl::desc("Display the SLP trees with Graphviz"));
176 
177 // Limit the number of alias checks. The limit is chosen so that
178 // it has no negative effect on the llvm benchmarks.
179 static const unsigned AliasedCheckLimit = 10;
180 
181 // Another limit for the alias checks: The maximum distance between load/store
182 // instructions where alias checks are done.
183 // This limit is useful for very large basic blocks.
184 static const unsigned MaxMemDepDistance = 160;
185 
186 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling
187 /// regions to be handled.
188 static const int MinScheduleRegionSize = 16;
189 
190 /// Predicate for the element types that the SLP vectorizer supports.
191 ///
192 /// The most important thing to filter here are types which are invalid in LLVM
193 /// vectors. We also filter target specific types which have absolutely no
194 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
195 /// avoids spending time checking the cost model and realizing that they will
196 /// be inevitably scalarized.
197 static bool isValidElementType(Type *Ty) {
198   return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
199          !Ty->isPPC_FP128Ty();
200 }
201 
202 /// \returns true if all of the instructions in \p VL are in the same block or
203 /// false otherwise.
204 static bool allSameBlock(ArrayRef<Value *> VL) {
205   Instruction *I0 = dyn_cast<Instruction>(VL[0]);
206   if (!I0)
207     return false;
208   BasicBlock *BB = I0->getParent();
209   for (int I = 1, E = VL.size(); I < E; I++) {
210     auto *II = dyn_cast<Instruction>(VL[I]);
211     if (!II)
212       return false;
213 
214     if (BB != II->getParent())
215       return false;
216   }
217   return true;
218 }
219 
220 /// \returns True if all of the values in \p VL are constants (but not
221 /// globals/constant expressions).
222 static bool allConstant(ArrayRef<Value *> VL) {
223   // Constant expressions and globals can't be vectorized like normal integer/FP
224   // constants.
225   for (Value *i : VL)
226     if (!isa<Constant>(i) || isa<ConstantExpr>(i) || isa<GlobalValue>(i))
227       return false;
228   return true;
229 }
230 
231 /// \returns True if all of the values in \p VL are identical.
232 static bool isSplat(ArrayRef<Value *> VL) {
233   for (unsigned i = 1, e = VL.size(); i < e; ++i)
234     if (VL[i] != VL[0])
235       return false;
236   return true;
237 }
238 
239 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator.
240 static bool isCommutative(Instruction *I) {
241   if (auto *Cmp = dyn_cast<CmpInst>(I))
242     return Cmp->isCommutative();
243   if (auto *BO = dyn_cast<BinaryOperator>(I))
244     return BO->isCommutative();
245   // TODO: This should check for generic Instruction::isCommutative(), but
246   //       we need to confirm that the caller code correctly handles Intrinsics
247   //       for example (does not have 2 operands).
248   return false;
249 }
250 
251 /// Checks if the vector of instructions can be represented as a shuffle, like:
252 /// %x0 = extractelement <4 x i8> %x, i32 0
253 /// %x3 = extractelement <4 x i8> %x, i32 3
254 /// %y1 = extractelement <4 x i8> %y, i32 1
255 /// %y2 = extractelement <4 x i8> %y, i32 2
256 /// %x0x0 = mul i8 %x0, %x0
257 /// %x3x3 = mul i8 %x3, %x3
258 /// %y1y1 = mul i8 %y1, %y1
259 /// %y2y2 = mul i8 %y2, %y2
260 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0
261 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1
262 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2
263 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3
264 /// ret <4 x i8> %ins4
265 /// can be transformed into:
266 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5,
267 ///                                                         i32 6>
268 /// %2 = mul <4 x i8> %1, %1
269 /// ret <4 x i8> %2
270 /// We convert this initially to something like:
271 /// %x0 = extractelement <4 x i8> %x, i32 0
272 /// %x3 = extractelement <4 x i8> %x, i32 3
273 /// %y1 = extractelement <4 x i8> %y, i32 1
274 /// %y2 = extractelement <4 x i8> %y, i32 2
275 /// %1 = insertelement <4 x i8> poison, i8 %x0, i32 0
276 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1
277 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2
278 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3
279 /// %5 = mul <4 x i8> %4, %4
280 /// %6 = extractelement <4 x i8> %5, i32 0
281 /// %ins1 = insertelement <4 x i8> poison, i8 %6, i32 0
282 /// %7 = extractelement <4 x i8> %5, i32 1
283 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1
284 /// %8 = extractelement <4 x i8> %5, i32 2
285 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2
286 /// %9 = extractelement <4 x i8> %5, i32 3
287 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3
288 /// ret <4 x i8> %ins4
289 /// InstCombiner transforms this into a shuffle and vector mul
290 /// Mask will return the Shuffle Mask equivalent to the extracted elements.
291 /// TODO: Can we split off and reuse the shuffle mask detection from
292 /// TargetTransformInfo::getInstructionThroughput?
293 static Optional<TargetTransformInfo::ShuffleKind>
294 isShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) {
295   auto *EI0 = cast<ExtractElementInst>(VL[0]);
296   unsigned Size =
297       cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements();
298   Value *Vec1 = nullptr;
299   Value *Vec2 = nullptr;
300   enum ShuffleMode { Unknown, Select, Permute };
301   ShuffleMode CommonShuffleMode = Unknown;
302   for (unsigned I = 0, E = VL.size(); I < E; ++I) {
303     auto *EI = cast<ExtractElementInst>(VL[I]);
304     auto *Vec = EI->getVectorOperand();
305     // All vector operands must have the same number of vector elements.
306     if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size)
307       return None;
308     auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand());
309     if (!Idx)
310       return None;
311     // Undefined behavior if Idx is negative or >= Size.
312     if (Idx->getValue().uge(Size)) {
313       Mask.push_back(UndefMaskElem);
314       continue;
315     }
316     unsigned IntIdx = Idx->getValue().getZExtValue();
317     Mask.push_back(IntIdx);
318     // We can extractelement from undef or poison vector.
319     if (isa<UndefValue>(Vec))
320       continue;
321     // For correct shuffling we have to have at most 2 different vector operands
322     // in all extractelement instructions.
323     if (!Vec1 || Vec1 == Vec)
324       Vec1 = Vec;
325     else if (!Vec2 || Vec2 == Vec)
326       Vec2 = Vec;
327     else
328       return None;
329     if (CommonShuffleMode == Permute)
330       continue;
331     // If the extract index is not the same as the operation number, it is a
332     // permutation.
333     if (IntIdx != I) {
334       CommonShuffleMode = Permute;
335       continue;
336     }
337     CommonShuffleMode = Select;
338   }
339   // If we're not crossing lanes in different vectors, consider it as blending.
340   if (CommonShuffleMode == Select && Vec2)
341     return TargetTransformInfo::SK_Select;
342   // If Vec2 was never used, we have a permutation of a single vector, otherwise
343   // we have permutation of 2 vectors.
344   return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc
345               : TargetTransformInfo::SK_PermuteSingleSrc;
346 }
347 
348 namespace {
349 
350 /// Main data required for vectorization of instructions.
351 struct InstructionsState {
352   /// The very first instruction in the list with the main opcode.
353   Value *OpValue = nullptr;
354 
355   /// The main/alternate instruction.
356   Instruction *MainOp = nullptr;
357   Instruction *AltOp = nullptr;
358 
359   /// The main/alternate opcodes for the list of instructions.
360   unsigned getOpcode() const {
361     return MainOp ? MainOp->getOpcode() : 0;
362   }
363 
364   unsigned getAltOpcode() const {
365     return AltOp ? AltOp->getOpcode() : 0;
366   }
367 
368   /// Some of the instructions in the list have alternate opcodes.
369   bool isAltShuffle() const { return getOpcode() != getAltOpcode(); }
370 
371   bool isOpcodeOrAlt(Instruction *I) const {
372     unsigned CheckedOpcode = I->getOpcode();
373     return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode;
374   }
375 
376   InstructionsState() = delete;
377   InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp)
378       : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {}
379 };
380 
381 } // end anonymous namespace
382 
383 /// Chooses the correct key for scheduling data. If \p Op has the same (or
384 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p
385 /// OpValue.
386 static Value *isOneOf(const InstructionsState &S, Value *Op) {
387   auto *I = dyn_cast<Instruction>(Op);
388   if (I && S.isOpcodeOrAlt(I))
389     return Op;
390   return S.OpValue;
391 }
392 
393 /// \returns true if \p Opcode is allowed as part of of the main/alternate
394 /// instruction for SLP vectorization.
395 ///
396 /// Example of unsupported opcode is SDIV that can potentially cause UB if the
397 /// "shuffled out" lane would result in division by zero.
398 static bool isValidForAlternation(unsigned Opcode) {
399   if (Instruction::isIntDivRem(Opcode))
400     return false;
401 
402   return true;
403 }
404 
405 /// \returns analysis of the Instructions in \p VL described in
406 /// InstructionsState, the Opcode that we suppose the whole list
407 /// could be vectorized even if its structure is diverse.
408 static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
409                                        unsigned BaseIndex = 0) {
410   // Make sure these are all Instructions.
411   if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); }))
412     return InstructionsState(VL[BaseIndex], nullptr, nullptr);
413 
414   bool IsCastOp = isa<CastInst>(VL[BaseIndex]);
415   bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]);
416   unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode();
417   unsigned AltOpcode = Opcode;
418   unsigned AltIndex = BaseIndex;
419 
420   // Check for one alternate opcode from another BinaryOperator.
421   // TODO - generalize to support all operators (types, calls etc.).
422   for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) {
423     unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode();
424     if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) {
425       if (InstOpcode == Opcode || InstOpcode == AltOpcode)
426         continue;
427       if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) &&
428           isValidForAlternation(Opcode)) {
429         AltOpcode = InstOpcode;
430         AltIndex = Cnt;
431         continue;
432       }
433     } else if (IsCastOp && isa<CastInst>(VL[Cnt])) {
434       Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType();
435       Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType();
436       if (Ty0 == Ty1) {
437         if (InstOpcode == Opcode || InstOpcode == AltOpcode)
438           continue;
439         if (Opcode == AltOpcode) {
440           assert(isValidForAlternation(Opcode) &&
441                  isValidForAlternation(InstOpcode) &&
442                  "Cast isn't safe for alternation, logic needs to be updated!");
443           AltOpcode = InstOpcode;
444           AltIndex = Cnt;
445           continue;
446         }
447       }
448     } else if (InstOpcode == Opcode || InstOpcode == AltOpcode)
449       continue;
450     return InstructionsState(VL[BaseIndex], nullptr, nullptr);
451   }
452 
453   return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]),
454                            cast<Instruction>(VL[AltIndex]));
455 }
456 
457 /// \returns true if all of the values in \p VL have the same type or false
458 /// otherwise.
459 static bool allSameType(ArrayRef<Value *> VL) {
460   Type *Ty = VL[0]->getType();
461   for (int i = 1, e = VL.size(); i < e; i++)
462     if (VL[i]->getType() != Ty)
463       return false;
464 
465   return true;
466 }
467 
468 /// \returns True if Extract{Value,Element} instruction extracts element Idx.
469 static Optional<unsigned> getExtractIndex(Instruction *E) {
470   unsigned Opcode = E->getOpcode();
471   assert((Opcode == Instruction::ExtractElement ||
472           Opcode == Instruction::ExtractValue) &&
473          "Expected extractelement or extractvalue instruction.");
474   if (Opcode == Instruction::ExtractElement) {
475     auto *CI = dyn_cast<ConstantInt>(E->getOperand(1));
476     if (!CI)
477       return None;
478     return CI->getZExtValue();
479   }
480   ExtractValueInst *EI = cast<ExtractValueInst>(E);
481   if (EI->getNumIndices() != 1)
482     return None;
483   return *EI->idx_begin();
484 }
485 
486 /// \returns True if in-tree use also needs extract. This refers to
487 /// possible scalar operand in vectorized instruction.
488 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
489                                     TargetLibraryInfo *TLI) {
490   unsigned Opcode = UserInst->getOpcode();
491   switch (Opcode) {
492   case Instruction::Load: {
493     LoadInst *LI = cast<LoadInst>(UserInst);
494     return (LI->getPointerOperand() == Scalar);
495   }
496   case Instruction::Store: {
497     StoreInst *SI = cast<StoreInst>(UserInst);
498     return (SI->getPointerOperand() == Scalar);
499   }
500   case Instruction::Call: {
501     CallInst *CI = cast<CallInst>(UserInst);
502     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
503     for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
504       if (hasVectorInstrinsicScalarOpd(ID, i))
505         return (CI->getArgOperand(i) == Scalar);
506     }
507     LLVM_FALLTHROUGH;
508   }
509   default:
510     return false;
511   }
512 }
513 
514 /// \returns the AA location that is being access by the instruction.
515 static MemoryLocation getLocation(Instruction *I, AAResults *AA) {
516   if (StoreInst *SI = dyn_cast<StoreInst>(I))
517     return MemoryLocation::get(SI);
518   if (LoadInst *LI = dyn_cast<LoadInst>(I))
519     return MemoryLocation::get(LI);
520   return MemoryLocation();
521 }
522 
523 /// \returns True if the instruction is not a volatile or atomic load/store.
524 static bool isSimple(Instruction *I) {
525   if (LoadInst *LI = dyn_cast<LoadInst>(I))
526     return LI->isSimple();
527   if (StoreInst *SI = dyn_cast<StoreInst>(I))
528     return SI->isSimple();
529   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
530     return !MI->isVolatile();
531   return true;
532 }
533 
534 namespace llvm {
535 
536 static void inversePermutation(ArrayRef<unsigned> Indices,
537                                SmallVectorImpl<int> &Mask) {
538   Mask.clear();
539   const unsigned E = Indices.size();
540   Mask.resize(E, E + 1);
541   for (unsigned I = 0; I < E; ++I)
542     Mask[Indices[I]] = I;
543 }
544 
545 namespace slpvectorizer {
546 
547 /// Bottom Up SLP Vectorizer.
548 class BoUpSLP {
549   struct TreeEntry;
550   struct ScheduleData;
551 
552 public:
553   using ValueList = SmallVector<Value *, 8>;
554   using InstrList = SmallVector<Instruction *, 16>;
555   using ValueSet = SmallPtrSet<Value *, 16>;
556   using StoreList = SmallVector<StoreInst *, 8>;
557   using ExtraValueToDebugLocsMap =
558       MapVector<Value *, SmallVector<Instruction *, 2>>;
559   using OrdersType = SmallVector<unsigned, 4>;
560 
561   BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti,
562           TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li,
563           DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB,
564           const DataLayout *DL, OptimizationRemarkEmitter *ORE)
565       : F(Func), SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC),
566         DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) {
567     CodeMetrics::collectEphemeralValues(F, AC, EphValues);
568     // Use the vector register size specified by the target unless overridden
569     // by a command-line option.
570     // TODO: It would be better to limit the vectorization factor based on
571     //       data type rather than just register size. For example, x86 AVX has
572     //       256-bit registers, but it does not support integer operations
573     //       at that width (that requires AVX2).
574     if (MaxVectorRegSizeOption.getNumOccurrences())
575       MaxVecRegSize = MaxVectorRegSizeOption;
576     else
577       MaxVecRegSize =
578           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
579               .getFixedSize();
580 
581     if (MinVectorRegSizeOption.getNumOccurrences())
582       MinVecRegSize = MinVectorRegSizeOption;
583     else
584       MinVecRegSize = TTI->getMinVectorRegisterBitWidth();
585   }
586 
587   /// Vectorize the tree that starts with the elements in \p VL.
588   /// Returns the vectorized root.
589   Value *vectorizeTree();
590 
591   /// Vectorize the tree but with the list of externally used values \p
592   /// ExternallyUsedValues. Values in this MapVector can be replaced but the
593   /// generated extractvalue instructions.
594   Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues);
595 
596   /// \returns the cost incurred by unwanted spills and fills, caused by
597   /// holding live values over call sites.
598   InstructionCost getSpillCost() const;
599 
600   /// \returns the vectorization cost of the subtree that starts at \p VL.
601   /// A negative number means that this is profitable.
602   InstructionCost getTreeCost();
603 
604   /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
605   /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
606   void buildTree(ArrayRef<Value *> Roots,
607                  ArrayRef<Value *> UserIgnoreLst = None);
608 
609   /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
610   /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking
611   /// into account (and updating it, if required) list of externally used
612   /// values stored in \p ExternallyUsedValues.
613   void buildTree(ArrayRef<Value *> Roots,
614                  ExtraValueToDebugLocsMap &ExternallyUsedValues,
615                  ArrayRef<Value *> UserIgnoreLst = None);
616 
617   /// Clear the internal data structures that are created by 'buildTree'.
618   void deleteTree() {
619     VectorizableTree.clear();
620     ScalarToTreeEntry.clear();
621     MustGather.clear();
622     ExternalUses.clear();
623     NumOpsWantToKeepOrder.clear();
624     NumOpsWantToKeepOriginalOrder = 0;
625     for (auto &Iter : BlocksSchedules) {
626       BlockScheduling *BS = Iter.second.get();
627       BS->clear();
628     }
629     MinBWs.clear();
630     InstrElementSize.clear();
631   }
632 
633   unsigned getTreeSize() const { return VectorizableTree.size(); }
634 
635   /// Perform LICM and CSE on the newly generated gather sequences.
636   void optimizeGatherSequence();
637 
638   /// \returns The best order of instructions for vectorization.
639   Optional<ArrayRef<unsigned>> bestOrder() const {
640     assert(llvm::all_of(
641                NumOpsWantToKeepOrder,
642                [this](const decltype(NumOpsWantToKeepOrder)::value_type &D) {
643                  return D.getFirst().size() ==
644                         VectorizableTree[0]->Scalars.size();
645                }) &&
646            "All orders must have the same size as number of instructions in "
647            "tree node.");
648     auto I = std::max_element(
649         NumOpsWantToKeepOrder.begin(), NumOpsWantToKeepOrder.end(),
650         [](const decltype(NumOpsWantToKeepOrder)::value_type &D1,
651            const decltype(NumOpsWantToKeepOrder)::value_type &D2) {
652           return D1.second < D2.second;
653         });
654     if (I == NumOpsWantToKeepOrder.end() ||
655         I->getSecond() <= NumOpsWantToKeepOriginalOrder)
656       return None;
657 
658     return makeArrayRef(I->getFirst());
659   }
660 
661   /// Builds the correct order for root instructions.
662   /// If some leaves have the same instructions to be vectorized, we may
663   /// incorrectly evaluate the best order for the root node (it is built for the
664   /// vector of instructions without repeated instructions and, thus, has less
665   /// elements than the root node). This function builds the correct order for
666   /// the root node.
667   /// For example, if the root node is \<a+b, a+c, a+d, f+e\>, then the leaves
668   /// are \<a, a, a, f\> and \<b, c, d, e\>. When we try to vectorize the first
669   /// leaf, it will be shrink to \<a, b\>. If instructions in this leaf should
670   /// be reordered, the best order will be \<1, 0\>. We need to extend this
671   /// order for the root node. For the root node this order should look like
672   /// \<3, 0, 1, 2\>. This function extends the order for the reused
673   /// instructions.
674   void findRootOrder(OrdersType &Order) {
675     // If the leaf has the same number of instructions to vectorize as the root
676     // - order must be set already.
677     unsigned RootSize = VectorizableTree[0]->Scalars.size();
678     if (Order.size() == RootSize)
679       return;
680     SmallVector<unsigned, 4> RealOrder(Order.size());
681     std::swap(Order, RealOrder);
682     SmallVector<int, 4> Mask;
683     inversePermutation(RealOrder, Mask);
684     Order.assign(Mask.begin(), Mask.end());
685     // The leaf has less number of instructions - need to find the true order of
686     // the root.
687     // Scan the nodes starting from the leaf back to the root.
688     const TreeEntry *PNode = VectorizableTree.back().get();
689     SmallVector<const TreeEntry *, 4> Nodes(1, PNode);
690     SmallPtrSet<const TreeEntry *, 4> Visited;
691     while (!Nodes.empty() && Order.size() != RootSize) {
692       const TreeEntry *PNode = Nodes.pop_back_val();
693       if (!Visited.insert(PNode).second)
694         continue;
695       const TreeEntry &Node = *PNode;
696       for (const EdgeInfo &EI : Node.UserTreeIndices)
697         if (EI.UserTE)
698           Nodes.push_back(EI.UserTE);
699       if (Node.ReuseShuffleIndices.empty())
700         continue;
701       // Build the order for the parent node.
702       OrdersType NewOrder(Node.ReuseShuffleIndices.size(), RootSize);
703       SmallVector<unsigned, 4> OrderCounter(Order.size(), 0);
704       // The algorithm of the order extension is:
705       // 1. Calculate the number of the same instructions for the order.
706       // 2. Calculate the index of the new order: total number of instructions
707       // with order less than the order of the current instruction + reuse
708       // number of the current instruction.
709       // 3. The new order is just the index of the instruction in the original
710       // vector of the instructions.
711       for (unsigned I : Node.ReuseShuffleIndices)
712         ++OrderCounter[Order[I]];
713       SmallVector<unsigned, 4> CurrentCounter(Order.size(), 0);
714       for (unsigned I = 0, E = Node.ReuseShuffleIndices.size(); I < E; ++I) {
715         unsigned ReusedIdx = Node.ReuseShuffleIndices[I];
716         unsigned OrderIdx = Order[ReusedIdx];
717         unsigned NewIdx = 0;
718         for (unsigned J = 0; J < OrderIdx; ++J)
719           NewIdx += OrderCounter[J];
720         NewIdx += CurrentCounter[OrderIdx];
721         ++CurrentCounter[OrderIdx];
722         assert(NewOrder[NewIdx] == RootSize &&
723                "The order index should not be written already.");
724         NewOrder[NewIdx] = I;
725       }
726       std::swap(Order, NewOrder);
727     }
728     assert(Order.size() == RootSize &&
729            "Root node is expected or the size of the order must be the same as "
730            "the number of elements in the root node.");
731     assert(llvm::all_of(Order,
732                         [RootSize](unsigned Val) { return Val != RootSize; }) &&
733            "All indices must be initialized");
734   }
735 
736   /// \return The vector element size in bits to use when vectorizing the
737   /// expression tree ending at \p V. If V is a store, the size is the width of
738   /// the stored value. Otherwise, the size is the width of the largest loaded
739   /// value reaching V. This method is used by the vectorizer to calculate
740   /// vectorization factors.
741   unsigned getVectorElementSize(Value *V);
742 
743   /// Compute the minimum type sizes required to represent the entries in a
744   /// vectorizable tree.
745   void computeMinimumValueSizes();
746 
747   // \returns maximum vector register size as set by TTI or overridden by cl::opt.
748   unsigned getMaxVecRegSize() const {
749     return MaxVecRegSize;
750   }
751 
752   // \returns minimum vector register size as set by cl::opt.
753   unsigned getMinVecRegSize() const {
754     return MinVecRegSize;
755   }
756 
757   unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
758     unsigned MaxVF = MaxVFOption.getNumOccurrences() ?
759       MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode);
760     return MaxVF ? MaxVF : UINT_MAX;
761   }
762 
763   /// Check if homogeneous aggregate is isomorphic to some VectorType.
764   /// Accepts homogeneous multidimensional aggregate of scalars/vectors like
765   /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> },
766   /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on.
767   ///
768   /// \returns number of elements in vector if isomorphism exists, 0 otherwise.
769   unsigned canMapToVector(Type *T, const DataLayout &DL) const;
770 
771   /// \returns True if the VectorizableTree is both tiny and not fully
772   /// vectorizable. We do not vectorize such trees.
773   bool isTreeTinyAndNotFullyVectorizable() const;
774 
775   /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values
776   /// can be load combined in the backend. Load combining may not be allowed in
777   /// the IR optimizer, so we do not want to alter the pattern. For example,
778   /// partially transforming a scalar bswap() pattern into vector code is
779   /// effectively impossible for the backend to undo.
780   /// TODO: If load combining is allowed in the IR optimizer, this analysis
781   ///       may not be necessary.
782   bool isLoadCombineReductionCandidate(RecurKind RdxKind) const;
783 
784   /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values
785   /// can be load combined in the backend. Load combining may not be allowed in
786   /// the IR optimizer, so we do not want to alter the pattern. For example,
787   /// partially transforming a scalar bswap() pattern into vector code is
788   /// effectively impossible for the backend to undo.
789   /// TODO: If load combining is allowed in the IR optimizer, this analysis
790   ///       may not be necessary.
791   bool isLoadCombineCandidate() const;
792 
793   OptimizationRemarkEmitter *getORE() { return ORE; }
794 
795   /// This structure holds any data we need about the edges being traversed
796   /// during buildTree_rec(). We keep track of:
797   /// (i) the user TreeEntry index, and
798   /// (ii) the index of the edge.
799   struct EdgeInfo {
800     EdgeInfo() = default;
801     EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx)
802         : UserTE(UserTE), EdgeIdx(EdgeIdx) {}
803     /// The user TreeEntry.
804     TreeEntry *UserTE = nullptr;
805     /// The operand index of the use.
806     unsigned EdgeIdx = UINT_MAX;
807 #ifndef NDEBUG
808     friend inline raw_ostream &operator<<(raw_ostream &OS,
809                                           const BoUpSLP::EdgeInfo &EI) {
810       EI.dump(OS);
811       return OS;
812     }
813     /// Debug print.
814     void dump(raw_ostream &OS) const {
815       OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null")
816          << " EdgeIdx:" << EdgeIdx << "}";
817     }
818     LLVM_DUMP_METHOD void dump() const { dump(dbgs()); }
819 #endif
820   };
821 
822   /// A helper data structure to hold the operands of a vector of instructions.
823   /// This supports a fixed vector length for all operand vectors.
824   class VLOperands {
825     /// For each operand we need (i) the value, and (ii) the opcode that it
826     /// would be attached to if the expression was in a left-linearized form.
827     /// This is required to avoid illegal operand reordering.
828     /// For example:
829     /// \verbatim
830     ///                         0 Op1
831     ///                         |/
832     /// Op1 Op2   Linearized    + Op2
833     ///   \ /     ---------->   |/
834     ///    -                    -
835     ///
836     /// Op1 - Op2            (0 + Op1) - Op2
837     /// \endverbatim
838     ///
839     /// Value Op1 is attached to a '+' operation, and Op2 to a '-'.
840     ///
841     /// Another way to think of this is to track all the operations across the
842     /// path from the operand all the way to the root of the tree and to
843     /// calculate the operation that corresponds to this path. For example, the
844     /// path from Op2 to the root crosses the RHS of the '-', therefore the
845     /// corresponding operation is a '-' (which matches the one in the
846     /// linearized tree, as shown above).
847     ///
848     /// For lack of a better term, we refer to this operation as Accumulated
849     /// Path Operation (APO).
850     struct OperandData {
851       OperandData() = default;
852       OperandData(Value *V, bool APO, bool IsUsed)
853           : V(V), APO(APO), IsUsed(IsUsed) {}
854       /// The operand value.
855       Value *V = nullptr;
856       /// TreeEntries only allow a single opcode, or an alternate sequence of
857       /// them (e.g, +, -). Therefore, we can safely use a boolean value for the
858       /// APO. It is set to 'true' if 'V' is attached to an inverse operation
859       /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise
860       /// (e.g., Add/Mul)
861       bool APO = false;
862       /// Helper data for the reordering function.
863       bool IsUsed = false;
864     };
865 
866     /// During operand reordering, we are trying to select the operand at lane
867     /// that matches best with the operand at the neighboring lane. Our
868     /// selection is based on the type of value we are looking for. For example,
869     /// if the neighboring lane has a load, we need to look for a load that is
870     /// accessing a consecutive address. These strategies are summarized in the
871     /// 'ReorderingMode' enumerator.
872     enum class ReorderingMode {
873       Load,     ///< Matching loads to consecutive memory addresses
874       Opcode,   ///< Matching instructions based on opcode (same or alternate)
875       Constant, ///< Matching constants
876       Splat,    ///< Matching the same instruction multiple times (broadcast)
877       Failed,   ///< We failed to create a vectorizable group
878     };
879 
880     using OperandDataVec = SmallVector<OperandData, 2>;
881 
882     /// A vector of operand vectors.
883     SmallVector<OperandDataVec, 4> OpsVec;
884 
885     const DataLayout &DL;
886     ScalarEvolution &SE;
887     const BoUpSLP &R;
888 
889     /// \returns the operand data at \p OpIdx and \p Lane.
890     OperandData &getData(unsigned OpIdx, unsigned Lane) {
891       return OpsVec[OpIdx][Lane];
892     }
893 
894     /// \returns the operand data at \p OpIdx and \p Lane. Const version.
895     const OperandData &getData(unsigned OpIdx, unsigned Lane) const {
896       return OpsVec[OpIdx][Lane];
897     }
898 
899     /// Clears the used flag for all entries.
900     void clearUsed() {
901       for (unsigned OpIdx = 0, NumOperands = getNumOperands();
902            OpIdx != NumOperands; ++OpIdx)
903         for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes;
904              ++Lane)
905           OpsVec[OpIdx][Lane].IsUsed = false;
906     }
907 
908     /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2.
909     void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) {
910       std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]);
911     }
912 
913     // The hard-coded scores listed here are not very important. When computing
914     // the scores of matching one sub-tree with another, we are basically
915     // counting the number of values that are matching. So even if all scores
916     // are set to 1, we would still get a decent matching result.
917     // However, sometimes we have to break ties. For example we may have to
918     // choose between matching loads vs matching opcodes. This is what these
919     // scores are helping us with: they provide the order of preference.
920 
921     /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]).
922     static const int ScoreConsecutiveLoads = 3;
923     /// ExtractElementInst from same vector and consecutive indexes.
924     static const int ScoreConsecutiveExtracts = 3;
925     /// Constants.
926     static const int ScoreConstants = 2;
927     /// Instructions with the same opcode.
928     static const int ScoreSameOpcode = 2;
929     /// Instructions with alt opcodes (e.g, add + sub).
930     static const int ScoreAltOpcodes = 1;
931     /// Identical instructions (a.k.a. splat or broadcast).
932     static const int ScoreSplat = 1;
933     /// Matching with an undef is preferable to failing.
934     static const int ScoreUndef = 1;
935     /// Score for failing to find a decent match.
936     static const int ScoreFail = 0;
937     /// User exteranl to the vectorized code.
938     static const int ExternalUseCost = 1;
939     /// The user is internal but in a different lane.
940     static const int UserInDiffLaneCost = ExternalUseCost;
941 
942     /// \returns the score of placing \p V1 and \p V2 in consecutive lanes.
943     static int getShallowScore(Value *V1, Value *V2, const DataLayout &DL,
944                                ScalarEvolution &SE) {
945       auto *LI1 = dyn_cast<LoadInst>(V1);
946       auto *LI2 = dyn_cast<LoadInst>(V2);
947       if (LI1 && LI2) {
948         if (LI1->getParent() != LI2->getParent())
949           return VLOperands::ScoreFail;
950 
951         Optional<int> Dist =
952             getPointersDiff(LI1->getPointerOperand(), LI2->getPointerOperand(),
953                             DL, SE, /*StrictCheck=*/true);
954         return (Dist && *Dist == 1) ? VLOperands::ScoreConsecutiveLoads
955                                     : VLOperands::ScoreFail;
956       }
957 
958       auto *C1 = dyn_cast<Constant>(V1);
959       auto *C2 = dyn_cast<Constant>(V2);
960       if (C1 && C2)
961         return VLOperands::ScoreConstants;
962 
963       // Extracts from consecutive indexes of the same vector better score as
964       // the extracts could be optimized away.
965       Value *EV;
966       ConstantInt *Ex1Idx, *Ex2Idx;
967       if (match(V1, m_ExtractElt(m_Value(EV), m_ConstantInt(Ex1Idx))) &&
968           match(V2, m_ExtractElt(m_Deferred(EV), m_ConstantInt(Ex2Idx))) &&
969           Ex1Idx->getZExtValue() + 1 == Ex2Idx->getZExtValue())
970         return VLOperands::ScoreConsecutiveExtracts;
971 
972       auto *I1 = dyn_cast<Instruction>(V1);
973       auto *I2 = dyn_cast<Instruction>(V2);
974       if (I1 && I2) {
975         if (I1 == I2)
976           return VLOperands::ScoreSplat;
977         InstructionsState S = getSameOpcode({I1, I2});
978         // Note: Only consider instructions with <= 2 operands to avoid
979         // complexity explosion.
980         if (S.getOpcode() && S.MainOp->getNumOperands() <= 2)
981           return S.isAltShuffle() ? VLOperands::ScoreAltOpcodes
982                                   : VLOperands::ScoreSameOpcode;
983       }
984 
985       if (isa<UndefValue>(V2))
986         return VLOperands::ScoreUndef;
987 
988       return VLOperands::ScoreFail;
989     }
990 
991     /// Holds the values and their lane that are taking part in the look-ahead
992     /// score calculation. This is used in the external uses cost calculation.
993     SmallDenseMap<Value *, int> InLookAheadValues;
994 
995     /// \Returns the additinal cost due to uses of \p LHS and \p RHS that are
996     /// either external to the vectorized code, or require shuffling.
997     int getExternalUsesCost(const std::pair<Value *, int> &LHS,
998                             const std::pair<Value *, int> &RHS) {
999       int Cost = 0;
1000       std::array<std::pair<Value *, int>, 2> Values = {{LHS, RHS}};
1001       for (int Idx = 0, IdxE = Values.size(); Idx != IdxE; ++Idx) {
1002         Value *V = Values[Idx].first;
1003         if (isa<Constant>(V)) {
1004           // Since this is a function pass, it doesn't make semantic sense to
1005           // walk the users of a subclass of Constant. The users could be in
1006           // another function, or even another module that happens to be in
1007           // the same LLVMContext.
1008           continue;
1009         }
1010 
1011         // Calculate the absolute lane, using the minimum relative lane of LHS
1012         // and RHS as base and Idx as the offset.
1013         int Ln = std::min(LHS.second, RHS.second) + Idx;
1014         assert(Ln >= 0 && "Bad lane calculation");
1015         unsigned UsersBudget = LookAheadUsersBudget;
1016         for (User *U : V->users()) {
1017           if (const TreeEntry *UserTE = R.getTreeEntry(U)) {
1018             // The user is in the VectorizableTree. Check if we need to insert.
1019             auto It = llvm::find(UserTE->Scalars, U);
1020             assert(It != UserTE->Scalars.end() && "U is in UserTE");
1021             int UserLn = std::distance(UserTE->Scalars.begin(), It);
1022             assert(UserLn >= 0 && "Bad lane");
1023             if (UserLn != Ln)
1024               Cost += UserInDiffLaneCost;
1025           } else {
1026             // Check if the user is in the look-ahead code.
1027             auto It2 = InLookAheadValues.find(U);
1028             if (It2 != InLookAheadValues.end()) {
1029               // The user is in the look-ahead code. Check the lane.
1030               if (It2->second != Ln)
1031                 Cost += UserInDiffLaneCost;
1032             } else {
1033               // The user is neither in SLP tree nor in the look-ahead code.
1034               Cost += ExternalUseCost;
1035             }
1036           }
1037           // Limit the number of visited uses to cap compilation time.
1038           if (--UsersBudget == 0)
1039             break;
1040         }
1041       }
1042       return Cost;
1043     }
1044 
1045     /// Go through the operands of \p LHS and \p RHS recursively until \p
1046     /// MaxLevel, and return the cummulative score. For example:
1047     /// \verbatim
1048     ///  A[0]  B[0]  A[1]  B[1]  C[0] D[0]  B[1] A[1]
1049     ///     \ /         \ /         \ /        \ /
1050     ///      +           +           +          +
1051     ///     G1          G2          G3         G4
1052     /// \endverbatim
1053     /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at
1054     /// each level recursively, accumulating the score. It starts from matching
1055     /// the additions at level 0, then moves on to the loads (level 1). The
1056     /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and
1057     /// {B[0],B[1]} match with VLOperands::ScoreConsecutiveLoads, while
1058     /// {A[0],C[0]} has a score of VLOperands::ScoreFail.
1059     /// Please note that the order of the operands does not matter, as we
1060     /// evaluate the score of all profitable combinations of operands. In
1061     /// other words the score of G1 and G4 is the same as G1 and G2. This
1062     /// heuristic is based on ideas described in:
1063     ///   Look-ahead SLP: Auto-vectorization in the presence of commutative
1064     ///   operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha,
1065     ///   Luís F. W. Góes
1066     int getScoreAtLevelRec(const std::pair<Value *, int> &LHS,
1067                            const std::pair<Value *, int> &RHS, int CurrLevel,
1068                            int MaxLevel) {
1069 
1070       Value *V1 = LHS.first;
1071       Value *V2 = RHS.first;
1072       // Get the shallow score of V1 and V2.
1073       int ShallowScoreAtThisLevel =
1074           std::max((int)ScoreFail, getShallowScore(V1, V2, DL, SE) -
1075                                        getExternalUsesCost(LHS, RHS));
1076       int Lane1 = LHS.second;
1077       int Lane2 = RHS.second;
1078 
1079       // If reached MaxLevel,
1080       //  or if V1 and V2 are not instructions,
1081       //  or if they are SPLAT,
1082       //  or if they are not consecutive, early return the current cost.
1083       auto *I1 = dyn_cast<Instruction>(V1);
1084       auto *I2 = dyn_cast<Instruction>(V2);
1085       if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 ||
1086           ShallowScoreAtThisLevel == VLOperands::ScoreFail ||
1087           (isa<LoadInst>(I1) && isa<LoadInst>(I2) && ShallowScoreAtThisLevel))
1088         return ShallowScoreAtThisLevel;
1089       assert(I1 && I2 && "Should have early exited.");
1090 
1091       // Keep track of in-tree values for determining the external-use cost.
1092       InLookAheadValues[V1] = Lane1;
1093       InLookAheadValues[V2] = Lane2;
1094 
1095       // Contains the I2 operand indexes that got matched with I1 operands.
1096       SmallSet<unsigned, 4> Op2Used;
1097 
1098       // Recursion towards the operands of I1 and I2. We are trying all possbile
1099       // operand pairs, and keeping track of the best score.
1100       for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands();
1101            OpIdx1 != NumOperands1; ++OpIdx1) {
1102         // Try to pair op1I with the best operand of I2.
1103         int MaxTmpScore = 0;
1104         unsigned MaxOpIdx2 = 0;
1105         bool FoundBest = false;
1106         // If I2 is commutative try all combinations.
1107         unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1;
1108         unsigned ToIdx = isCommutative(I2)
1109                              ? I2->getNumOperands()
1110                              : std::min(I2->getNumOperands(), OpIdx1 + 1);
1111         assert(FromIdx <= ToIdx && "Bad index");
1112         for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) {
1113           // Skip operands already paired with OpIdx1.
1114           if (Op2Used.count(OpIdx2))
1115             continue;
1116           // Recursively calculate the cost at each level
1117           int TmpScore = getScoreAtLevelRec({I1->getOperand(OpIdx1), Lane1},
1118                                             {I2->getOperand(OpIdx2), Lane2},
1119                                             CurrLevel + 1, MaxLevel);
1120           // Look for the best score.
1121           if (TmpScore > VLOperands::ScoreFail && TmpScore > MaxTmpScore) {
1122             MaxTmpScore = TmpScore;
1123             MaxOpIdx2 = OpIdx2;
1124             FoundBest = true;
1125           }
1126         }
1127         if (FoundBest) {
1128           // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it.
1129           Op2Used.insert(MaxOpIdx2);
1130           ShallowScoreAtThisLevel += MaxTmpScore;
1131         }
1132       }
1133       return ShallowScoreAtThisLevel;
1134     }
1135 
1136     /// \Returns the look-ahead score, which tells us how much the sub-trees
1137     /// rooted at \p LHS and \p RHS match, the more they match the higher the
1138     /// score. This helps break ties in an informed way when we cannot decide on
1139     /// the order of the operands by just considering the immediate
1140     /// predecessors.
1141     int getLookAheadScore(const std::pair<Value *, int> &LHS,
1142                           const std::pair<Value *, int> &RHS) {
1143       InLookAheadValues.clear();
1144       return getScoreAtLevelRec(LHS, RHS, 1, LookAheadMaxDepth);
1145     }
1146 
1147     // Search all operands in Ops[*][Lane] for the one that matches best
1148     // Ops[OpIdx][LastLane] and return its opreand index.
1149     // If no good match can be found, return None.
1150     Optional<unsigned>
1151     getBestOperand(unsigned OpIdx, int Lane, int LastLane,
1152                    ArrayRef<ReorderingMode> ReorderingModes) {
1153       unsigned NumOperands = getNumOperands();
1154 
1155       // The operand of the previous lane at OpIdx.
1156       Value *OpLastLane = getData(OpIdx, LastLane).V;
1157 
1158       // Our strategy mode for OpIdx.
1159       ReorderingMode RMode = ReorderingModes[OpIdx];
1160 
1161       // The linearized opcode of the operand at OpIdx, Lane.
1162       bool OpIdxAPO = getData(OpIdx, Lane).APO;
1163 
1164       // The best operand index and its score.
1165       // Sometimes we have more than one option (e.g., Opcode and Undefs), so we
1166       // are using the score to differentiate between the two.
1167       struct BestOpData {
1168         Optional<unsigned> Idx = None;
1169         unsigned Score = 0;
1170       } BestOp;
1171 
1172       // Iterate through all unused operands and look for the best.
1173       for (unsigned Idx = 0; Idx != NumOperands; ++Idx) {
1174         // Get the operand at Idx and Lane.
1175         OperandData &OpData = getData(Idx, Lane);
1176         Value *Op = OpData.V;
1177         bool OpAPO = OpData.APO;
1178 
1179         // Skip already selected operands.
1180         if (OpData.IsUsed)
1181           continue;
1182 
1183         // Skip if we are trying to move the operand to a position with a
1184         // different opcode in the linearized tree form. This would break the
1185         // semantics.
1186         if (OpAPO != OpIdxAPO)
1187           continue;
1188 
1189         // Look for an operand that matches the current mode.
1190         switch (RMode) {
1191         case ReorderingMode::Load:
1192         case ReorderingMode::Constant:
1193         case ReorderingMode::Opcode: {
1194           bool LeftToRight = Lane > LastLane;
1195           Value *OpLeft = (LeftToRight) ? OpLastLane : Op;
1196           Value *OpRight = (LeftToRight) ? Op : OpLastLane;
1197           unsigned Score =
1198               getLookAheadScore({OpLeft, LastLane}, {OpRight, Lane});
1199           if (Score > BestOp.Score) {
1200             BestOp.Idx = Idx;
1201             BestOp.Score = Score;
1202           }
1203           break;
1204         }
1205         case ReorderingMode::Splat:
1206           if (Op == OpLastLane)
1207             BestOp.Idx = Idx;
1208           break;
1209         case ReorderingMode::Failed:
1210           return None;
1211         }
1212       }
1213 
1214       if (BestOp.Idx) {
1215         getData(BestOp.Idx.getValue(), Lane).IsUsed = true;
1216         return BestOp.Idx;
1217       }
1218       // If we could not find a good match return None.
1219       return None;
1220     }
1221 
1222     /// Helper for reorderOperandVecs. \Returns the lane that we should start
1223     /// reordering from. This is the one which has the least number of operands
1224     /// that can freely move about.
1225     unsigned getBestLaneToStartReordering() const {
1226       unsigned BestLane = 0;
1227       unsigned Min = UINT_MAX;
1228       for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes;
1229            ++Lane) {
1230         unsigned NumFreeOps = getMaxNumOperandsThatCanBeReordered(Lane);
1231         if (NumFreeOps < Min) {
1232           Min = NumFreeOps;
1233           BestLane = Lane;
1234         }
1235       }
1236       return BestLane;
1237     }
1238 
1239     /// \Returns the maximum number of operands that are allowed to be reordered
1240     /// for \p Lane. This is used as a heuristic for selecting the first lane to
1241     /// start operand reordering.
1242     unsigned getMaxNumOperandsThatCanBeReordered(unsigned Lane) const {
1243       unsigned CntTrue = 0;
1244       unsigned NumOperands = getNumOperands();
1245       // Operands with the same APO can be reordered. We therefore need to count
1246       // how many of them we have for each APO, like this: Cnt[APO] = x.
1247       // Since we only have two APOs, namely true and false, we can avoid using
1248       // a map. Instead we can simply count the number of operands that
1249       // correspond to one of them (in this case the 'true' APO), and calculate
1250       // the other by subtracting it from the total number of operands.
1251       for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx)
1252         if (getData(OpIdx, Lane).APO)
1253           ++CntTrue;
1254       unsigned CntFalse = NumOperands - CntTrue;
1255       return std::max(CntTrue, CntFalse);
1256     }
1257 
1258     /// Go through the instructions in VL and append their operands.
1259     void appendOperandsOfVL(ArrayRef<Value *> VL) {
1260       assert(!VL.empty() && "Bad VL");
1261       assert((empty() || VL.size() == getNumLanes()) &&
1262              "Expected same number of lanes");
1263       assert(isa<Instruction>(VL[0]) && "Expected instruction");
1264       unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands();
1265       OpsVec.resize(NumOperands);
1266       unsigned NumLanes = VL.size();
1267       for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1268         OpsVec[OpIdx].resize(NumLanes);
1269         for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1270           assert(isa<Instruction>(VL[Lane]) && "Expected instruction");
1271           // Our tree has just 3 nodes: the root and two operands.
1272           // It is therefore trivial to get the APO. We only need to check the
1273           // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or
1274           // RHS operand. The LHS operand of both add and sub is never attached
1275           // to an inversese operation in the linearized form, therefore its APO
1276           // is false. The RHS is true only if VL[Lane] is an inverse operation.
1277 
1278           // Since operand reordering is performed on groups of commutative
1279           // operations or alternating sequences (e.g., +, -), we can safely
1280           // tell the inverse operations by checking commutativity.
1281           bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane]));
1282           bool APO = (OpIdx == 0) ? false : IsInverseOperation;
1283           OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx),
1284                                  APO, false};
1285         }
1286       }
1287     }
1288 
1289     /// \returns the number of operands.
1290     unsigned getNumOperands() const { return OpsVec.size(); }
1291 
1292     /// \returns the number of lanes.
1293     unsigned getNumLanes() const { return OpsVec[0].size(); }
1294 
1295     /// \returns the operand value at \p OpIdx and \p Lane.
1296     Value *getValue(unsigned OpIdx, unsigned Lane) const {
1297       return getData(OpIdx, Lane).V;
1298     }
1299 
1300     /// \returns true if the data structure is empty.
1301     bool empty() const { return OpsVec.empty(); }
1302 
1303     /// Clears the data.
1304     void clear() { OpsVec.clear(); }
1305 
1306     /// \Returns true if there are enough operands identical to \p Op to fill
1307     /// the whole vector.
1308     /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow.
1309     bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) {
1310       bool OpAPO = getData(OpIdx, Lane).APO;
1311       for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) {
1312         if (Ln == Lane)
1313           continue;
1314         // This is set to true if we found a candidate for broadcast at Lane.
1315         bool FoundCandidate = false;
1316         for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) {
1317           OperandData &Data = getData(OpI, Ln);
1318           if (Data.APO != OpAPO || Data.IsUsed)
1319             continue;
1320           if (Data.V == Op) {
1321             FoundCandidate = true;
1322             Data.IsUsed = true;
1323             break;
1324           }
1325         }
1326         if (!FoundCandidate)
1327           return false;
1328       }
1329       return true;
1330     }
1331 
1332   public:
1333     /// Initialize with all the operands of the instruction vector \p RootVL.
1334     VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL,
1335                ScalarEvolution &SE, const BoUpSLP &R)
1336         : DL(DL), SE(SE), R(R) {
1337       // Append all the operands of RootVL.
1338       appendOperandsOfVL(RootVL);
1339     }
1340 
1341     /// \Returns a value vector with the operands across all lanes for the
1342     /// opearnd at \p OpIdx.
1343     ValueList getVL(unsigned OpIdx) const {
1344       ValueList OpVL(OpsVec[OpIdx].size());
1345       assert(OpsVec[OpIdx].size() == getNumLanes() &&
1346              "Expected same num of lanes across all operands");
1347       for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane)
1348         OpVL[Lane] = OpsVec[OpIdx][Lane].V;
1349       return OpVL;
1350     }
1351 
1352     // Performs operand reordering for 2 or more operands.
1353     // The original operands are in OrigOps[OpIdx][Lane].
1354     // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'.
1355     void reorder() {
1356       unsigned NumOperands = getNumOperands();
1357       unsigned NumLanes = getNumLanes();
1358       // Each operand has its own mode. We are using this mode to help us select
1359       // the instructions for each lane, so that they match best with the ones
1360       // we have selected so far.
1361       SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands);
1362 
1363       // This is a greedy single-pass algorithm. We are going over each lane
1364       // once and deciding on the best order right away with no back-tracking.
1365       // However, in order to increase its effectiveness, we start with the lane
1366       // that has operands that can move the least. For example, given the
1367       // following lanes:
1368       //  Lane 0 : A[0] = B[0] + C[0]   // Visited 3rd
1369       //  Lane 1 : A[1] = C[1] - B[1]   // Visited 1st
1370       //  Lane 2 : A[2] = B[2] + C[2]   // Visited 2nd
1371       //  Lane 3 : A[3] = C[3] - B[3]   // Visited 4th
1372       // we will start at Lane 1, since the operands of the subtraction cannot
1373       // be reordered. Then we will visit the rest of the lanes in a circular
1374       // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3.
1375 
1376       // Find the first lane that we will start our search from.
1377       unsigned FirstLane = getBestLaneToStartReordering();
1378 
1379       // Initialize the modes.
1380       for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1381         Value *OpLane0 = getValue(OpIdx, FirstLane);
1382         // Keep track if we have instructions with all the same opcode on one
1383         // side.
1384         if (isa<LoadInst>(OpLane0))
1385           ReorderingModes[OpIdx] = ReorderingMode::Load;
1386         else if (isa<Instruction>(OpLane0)) {
1387           // Check if OpLane0 should be broadcast.
1388           if (shouldBroadcast(OpLane0, OpIdx, FirstLane))
1389             ReorderingModes[OpIdx] = ReorderingMode::Splat;
1390           else
1391             ReorderingModes[OpIdx] = ReorderingMode::Opcode;
1392         }
1393         else if (isa<Constant>(OpLane0))
1394           ReorderingModes[OpIdx] = ReorderingMode::Constant;
1395         else if (isa<Argument>(OpLane0))
1396           // Our best hope is a Splat. It may save some cost in some cases.
1397           ReorderingModes[OpIdx] = ReorderingMode::Splat;
1398         else
1399           // NOTE: This should be unreachable.
1400           ReorderingModes[OpIdx] = ReorderingMode::Failed;
1401       }
1402 
1403       // If the initial strategy fails for any of the operand indexes, then we
1404       // perform reordering again in a second pass. This helps avoid assigning
1405       // high priority to the failed strategy, and should improve reordering for
1406       // the non-failed operand indexes.
1407       for (int Pass = 0; Pass != 2; ++Pass) {
1408         // Skip the second pass if the first pass did not fail.
1409         bool StrategyFailed = false;
1410         // Mark all operand data as free to use.
1411         clearUsed();
1412         // We keep the original operand order for the FirstLane, so reorder the
1413         // rest of the lanes. We are visiting the nodes in a circular fashion,
1414         // using FirstLane as the center point and increasing the radius
1415         // distance.
1416         for (unsigned Distance = 1; Distance != NumLanes; ++Distance) {
1417           // Visit the lane on the right and then the lane on the left.
1418           for (int Direction : {+1, -1}) {
1419             int Lane = FirstLane + Direction * Distance;
1420             if (Lane < 0 || Lane >= (int)NumLanes)
1421               continue;
1422             int LastLane = Lane - Direction;
1423             assert(LastLane >= 0 && LastLane < (int)NumLanes &&
1424                    "Out of bounds");
1425             // Look for a good match for each operand.
1426             for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1427               // Search for the operand that matches SortedOps[OpIdx][Lane-1].
1428               Optional<unsigned> BestIdx =
1429                   getBestOperand(OpIdx, Lane, LastLane, ReorderingModes);
1430               // By not selecting a value, we allow the operands that follow to
1431               // select a better matching value. We will get a non-null value in
1432               // the next run of getBestOperand().
1433               if (BestIdx) {
1434                 // Swap the current operand with the one returned by
1435                 // getBestOperand().
1436                 swap(OpIdx, BestIdx.getValue(), Lane);
1437               } else {
1438                 // We failed to find a best operand, set mode to 'Failed'.
1439                 ReorderingModes[OpIdx] = ReorderingMode::Failed;
1440                 // Enable the second pass.
1441                 StrategyFailed = true;
1442               }
1443             }
1444           }
1445         }
1446         // Skip second pass if the strategy did not fail.
1447         if (!StrategyFailed)
1448           break;
1449       }
1450     }
1451 
1452 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1453     LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) {
1454       switch (RMode) {
1455       case ReorderingMode::Load:
1456         return "Load";
1457       case ReorderingMode::Opcode:
1458         return "Opcode";
1459       case ReorderingMode::Constant:
1460         return "Constant";
1461       case ReorderingMode::Splat:
1462         return "Splat";
1463       case ReorderingMode::Failed:
1464         return "Failed";
1465       }
1466       llvm_unreachable("Unimplemented Reordering Type");
1467     }
1468 
1469     LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode,
1470                                                    raw_ostream &OS) {
1471       return OS << getModeStr(RMode);
1472     }
1473 
1474     /// Debug print.
1475     LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) {
1476       printMode(RMode, dbgs());
1477     }
1478 
1479     friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) {
1480       return printMode(RMode, OS);
1481     }
1482 
1483     LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const {
1484       const unsigned Indent = 2;
1485       unsigned Cnt = 0;
1486       for (const OperandDataVec &OpDataVec : OpsVec) {
1487         OS << "Operand " << Cnt++ << "\n";
1488         for (const OperandData &OpData : OpDataVec) {
1489           OS.indent(Indent) << "{";
1490           if (Value *V = OpData.V)
1491             OS << *V;
1492           else
1493             OS << "null";
1494           OS << ", APO:" << OpData.APO << "}\n";
1495         }
1496         OS << "\n";
1497       }
1498       return OS;
1499     }
1500 
1501     /// Debug print.
1502     LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
1503 #endif
1504   };
1505 
1506   /// Checks if the instruction is marked for deletion.
1507   bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); }
1508 
1509   /// Marks values operands for later deletion by replacing them with Undefs.
1510   void eraseInstructions(ArrayRef<Value *> AV);
1511 
1512   ~BoUpSLP();
1513 
1514 private:
1515   /// Checks if all users of \p I are the part of the vectorization tree.
1516   bool areAllUsersVectorized(Instruction *I) const;
1517 
1518   /// \returns the cost of the vectorizable entry.
1519   InstructionCost getEntryCost(TreeEntry *E);
1520 
1521   /// This is the recursive part of buildTree.
1522   void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth,
1523                      const EdgeInfo &EI);
1524 
1525   /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can
1526   /// be vectorized to use the original vector (or aggregate "bitcast" to a
1527   /// vector) and sets \p CurrentOrder to the identity permutation; otherwise
1528   /// returns false, setting \p CurrentOrder to either an empty vector or a
1529   /// non-identity permutation that allows to reuse extract instructions.
1530   bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
1531                        SmallVectorImpl<unsigned> &CurrentOrder) const;
1532 
1533   /// Vectorize a single entry in the tree.
1534   Value *vectorizeTree(TreeEntry *E);
1535 
1536   /// Vectorize a single entry in the tree, starting in \p VL.
1537   Value *vectorizeTree(ArrayRef<Value *> VL);
1538 
1539   /// \returns the scalarization cost for this type. Scalarization in this
1540   /// context means the creation of vectors from a group of scalars.
1541   InstructionCost
1542   getGatherCost(FixedVectorType *Ty,
1543                 const DenseSet<unsigned> &ShuffledIndices) const;
1544 
1545   /// \returns the scalarization cost for this list of values. Assuming that
1546   /// this subtree gets vectorized, we may need to extract the values from the
1547   /// roots. This method calculates the cost of extracting the values.
1548   InstructionCost getGatherCost(ArrayRef<Value *> VL) const;
1549 
1550   /// Set the Builder insert point to one after the last instruction in
1551   /// the bundle
1552   void setInsertPointAfterBundle(TreeEntry *E);
1553 
1554   /// \returns a vector from a collection of scalars in \p VL.
1555   Value *gather(ArrayRef<Value *> VL);
1556 
1557   /// \returns whether the VectorizableTree is fully vectorizable and will
1558   /// be beneficial even the tree height is tiny.
1559   bool isFullyVectorizableTinyTree() const;
1560 
1561   /// Reorder commutative or alt operands to get better probability of
1562   /// generating vectorized code.
1563   static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
1564                                              SmallVectorImpl<Value *> &Left,
1565                                              SmallVectorImpl<Value *> &Right,
1566                                              const DataLayout &DL,
1567                                              ScalarEvolution &SE,
1568                                              const BoUpSLP &R);
1569   struct TreeEntry {
1570     using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>;
1571     TreeEntry(VecTreeTy &Container) : Container(Container) {}
1572 
1573     /// \returns true if the scalars in VL are equal to this entry.
1574     bool isSame(ArrayRef<Value *> VL) const {
1575       if (VL.size() == Scalars.size())
1576         return std::equal(VL.begin(), VL.end(), Scalars.begin());
1577       return VL.size() == ReuseShuffleIndices.size() &&
1578              std::equal(
1579                  VL.begin(), VL.end(), ReuseShuffleIndices.begin(),
1580                  [this](Value *V, int Idx) { return V == Scalars[Idx]; });
1581     }
1582 
1583     /// A vector of scalars.
1584     ValueList Scalars;
1585 
1586     /// The Scalars are vectorized into this value. It is initialized to Null.
1587     Value *VectorizedValue = nullptr;
1588 
1589     /// Do we need to gather this sequence or vectorize it
1590     /// (either with vector instruction or with scatter/gather
1591     /// intrinsics for store/load)?
1592     enum EntryState { Vectorize, ScatterVectorize, NeedToGather };
1593     EntryState State;
1594 
1595     /// Does this sequence require some shuffling?
1596     SmallVector<int, 4> ReuseShuffleIndices;
1597 
1598     /// Does this entry require reordering?
1599     SmallVector<unsigned, 4> ReorderIndices;
1600 
1601     /// Points back to the VectorizableTree.
1602     ///
1603     /// Only used for Graphviz right now.  Unfortunately GraphTrait::NodeRef has
1604     /// to be a pointer and needs to be able to initialize the child iterator.
1605     /// Thus we need a reference back to the container to translate the indices
1606     /// to entries.
1607     VecTreeTy &Container;
1608 
1609     /// The TreeEntry index containing the user of this entry.  We can actually
1610     /// have multiple users so the data structure is not truly a tree.
1611     SmallVector<EdgeInfo, 1> UserTreeIndices;
1612 
1613     /// The index of this treeEntry in VectorizableTree.
1614     int Idx = -1;
1615 
1616   private:
1617     /// The operands of each instruction in each lane Operands[op_index][lane].
1618     /// Note: This helps avoid the replication of the code that performs the
1619     /// reordering of operands during buildTree_rec() and vectorizeTree().
1620     SmallVector<ValueList, 2> Operands;
1621 
1622     /// The main/alternate instruction.
1623     Instruction *MainOp = nullptr;
1624     Instruction *AltOp = nullptr;
1625 
1626   public:
1627     /// Set this bundle's \p OpIdx'th operand to \p OpVL.
1628     void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) {
1629       if (Operands.size() < OpIdx + 1)
1630         Operands.resize(OpIdx + 1);
1631       assert(Operands[OpIdx].size() == 0 && "Already resized?");
1632       Operands[OpIdx].resize(Scalars.size());
1633       for (unsigned Lane = 0, E = Scalars.size(); Lane != E; ++Lane)
1634         Operands[OpIdx][Lane] = OpVL[Lane];
1635     }
1636 
1637     /// Set the operands of this bundle in their original order.
1638     void setOperandsInOrder() {
1639       assert(Operands.empty() && "Already initialized?");
1640       auto *I0 = cast<Instruction>(Scalars[0]);
1641       Operands.resize(I0->getNumOperands());
1642       unsigned NumLanes = Scalars.size();
1643       for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands();
1644            OpIdx != NumOperands; ++OpIdx) {
1645         Operands[OpIdx].resize(NumLanes);
1646         for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1647           auto *I = cast<Instruction>(Scalars[Lane]);
1648           assert(I->getNumOperands() == NumOperands &&
1649                  "Expected same number of operands");
1650           Operands[OpIdx][Lane] = I->getOperand(OpIdx);
1651         }
1652       }
1653     }
1654 
1655     /// \returns the \p OpIdx operand of this TreeEntry.
1656     ValueList &getOperand(unsigned OpIdx) {
1657       assert(OpIdx < Operands.size() && "Off bounds");
1658       return Operands[OpIdx];
1659     }
1660 
1661     /// \returns the number of operands.
1662     unsigned getNumOperands() const { return Operands.size(); }
1663 
1664     /// \return the single \p OpIdx operand.
1665     Value *getSingleOperand(unsigned OpIdx) const {
1666       assert(OpIdx < Operands.size() && "Off bounds");
1667       assert(!Operands[OpIdx].empty() && "No operand available");
1668       return Operands[OpIdx][0];
1669     }
1670 
1671     /// Some of the instructions in the list have alternate opcodes.
1672     bool isAltShuffle() const {
1673       return getOpcode() != getAltOpcode();
1674     }
1675 
1676     bool isOpcodeOrAlt(Instruction *I) const {
1677       unsigned CheckedOpcode = I->getOpcode();
1678       return (getOpcode() == CheckedOpcode ||
1679               getAltOpcode() == CheckedOpcode);
1680     }
1681 
1682     /// Chooses the correct key for scheduling data. If \p Op has the same (or
1683     /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is
1684     /// \p OpValue.
1685     Value *isOneOf(Value *Op) const {
1686       auto *I = dyn_cast<Instruction>(Op);
1687       if (I && isOpcodeOrAlt(I))
1688         return Op;
1689       return MainOp;
1690     }
1691 
1692     void setOperations(const InstructionsState &S) {
1693       MainOp = S.MainOp;
1694       AltOp = S.AltOp;
1695     }
1696 
1697     Instruction *getMainOp() const {
1698       return MainOp;
1699     }
1700 
1701     Instruction *getAltOp() const {
1702       return AltOp;
1703     }
1704 
1705     /// The main/alternate opcodes for the list of instructions.
1706     unsigned getOpcode() const {
1707       return MainOp ? MainOp->getOpcode() : 0;
1708     }
1709 
1710     unsigned getAltOpcode() const {
1711       return AltOp ? AltOp->getOpcode() : 0;
1712     }
1713 
1714     /// Update operations state of this entry if reorder occurred.
1715     bool updateStateIfReorder() {
1716       if (ReorderIndices.empty())
1717         return false;
1718       InstructionsState S = getSameOpcode(Scalars, ReorderIndices.front());
1719       setOperations(S);
1720       return true;
1721     }
1722 
1723 #ifndef NDEBUG
1724     /// Debug printer.
1725     LLVM_DUMP_METHOD void dump() const {
1726       dbgs() << Idx << ".\n";
1727       for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) {
1728         dbgs() << "Operand " << OpI << ":\n";
1729         for (const Value *V : Operands[OpI])
1730           dbgs().indent(2) << *V << "\n";
1731       }
1732       dbgs() << "Scalars: \n";
1733       for (Value *V : Scalars)
1734         dbgs().indent(2) << *V << "\n";
1735       dbgs() << "State: ";
1736       switch (State) {
1737       case Vectorize:
1738         dbgs() << "Vectorize\n";
1739         break;
1740       case ScatterVectorize:
1741         dbgs() << "ScatterVectorize\n";
1742         break;
1743       case NeedToGather:
1744         dbgs() << "NeedToGather\n";
1745         break;
1746       }
1747       dbgs() << "MainOp: ";
1748       if (MainOp)
1749         dbgs() << *MainOp << "\n";
1750       else
1751         dbgs() << "NULL\n";
1752       dbgs() << "AltOp: ";
1753       if (AltOp)
1754         dbgs() << *AltOp << "\n";
1755       else
1756         dbgs() << "NULL\n";
1757       dbgs() << "VectorizedValue: ";
1758       if (VectorizedValue)
1759         dbgs() << *VectorizedValue << "\n";
1760       else
1761         dbgs() << "NULL\n";
1762       dbgs() << "ReuseShuffleIndices: ";
1763       if (ReuseShuffleIndices.empty())
1764         dbgs() << "Empty";
1765       else
1766         for (unsigned ReuseIdx : ReuseShuffleIndices)
1767           dbgs() << ReuseIdx << ", ";
1768       dbgs() << "\n";
1769       dbgs() << "ReorderIndices: ";
1770       for (unsigned ReorderIdx : ReorderIndices)
1771         dbgs() << ReorderIdx << ", ";
1772       dbgs() << "\n";
1773       dbgs() << "UserTreeIndices: ";
1774       for (const auto &EInfo : UserTreeIndices)
1775         dbgs() << EInfo << ", ";
1776       dbgs() << "\n";
1777     }
1778 #endif
1779   };
1780 
1781 #ifndef NDEBUG
1782   void dumpTreeCosts(TreeEntry *E, InstructionCost ReuseShuffleCost,
1783                      InstructionCost VecCost,
1784                      InstructionCost ScalarCost) const {
1785     dbgs() << "SLP: Calculated costs for Tree:\n"; E->dump();
1786     dbgs() << "SLP: Costs:\n";
1787     dbgs() << "SLP:     ReuseShuffleCost = " << ReuseShuffleCost << "\n";
1788     dbgs() << "SLP:     VectorCost = " << VecCost << "\n";
1789     dbgs() << "SLP:     ScalarCost = " << ScalarCost << "\n";
1790     dbgs() << "SLP:     ReuseShuffleCost + VecCost - ScalarCost = " <<
1791                ReuseShuffleCost + VecCost - ScalarCost << "\n";
1792   }
1793 #endif
1794 
1795   /// Create a new VectorizableTree entry.
1796   TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle,
1797                           const InstructionsState &S,
1798                           const EdgeInfo &UserTreeIdx,
1799                           ArrayRef<unsigned> ReuseShuffleIndices = None,
1800                           ArrayRef<unsigned> ReorderIndices = None) {
1801     TreeEntry::EntryState EntryState =
1802         Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather;
1803     return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx,
1804                         ReuseShuffleIndices, ReorderIndices);
1805   }
1806 
1807   TreeEntry *newTreeEntry(ArrayRef<Value *> VL,
1808                           TreeEntry::EntryState EntryState,
1809                           Optional<ScheduleData *> Bundle,
1810                           const InstructionsState &S,
1811                           const EdgeInfo &UserTreeIdx,
1812                           ArrayRef<unsigned> ReuseShuffleIndices = None,
1813                           ArrayRef<unsigned> ReorderIndices = None) {
1814     assert(((!Bundle && EntryState == TreeEntry::NeedToGather) ||
1815             (Bundle && EntryState != TreeEntry::NeedToGather)) &&
1816            "Need to vectorize gather entry?");
1817     VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree));
1818     TreeEntry *Last = VectorizableTree.back().get();
1819     Last->Idx = VectorizableTree.size() - 1;
1820     Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end());
1821     Last->State = EntryState;
1822     Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(),
1823                                      ReuseShuffleIndices.end());
1824     Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end());
1825     Last->setOperations(S);
1826     if (Last->State != TreeEntry::NeedToGather) {
1827       for (Value *V : VL) {
1828         assert(!getTreeEntry(V) && "Scalar already in tree!");
1829         ScalarToTreeEntry[V] = Last;
1830       }
1831       // Update the scheduler bundle to point to this TreeEntry.
1832       unsigned Lane = 0;
1833       for (ScheduleData *BundleMember = Bundle.getValue(); BundleMember;
1834            BundleMember = BundleMember->NextInBundle) {
1835         BundleMember->TE = Last;
1836         BundleMember->Lane = Lane;
1837         ++Lane;
1838       }
1839       assert((!Bundle.getValue() || Lane == VL.size()) &&
1840              "Bundle and VL out of sync");
1841     } else {
1842       MustGather.insert(VL.begin(), VL.end());
1843     }
1844 
1845     if (UserTreeIdx.UserTE)
1846       Last->UserTreeIndices.push_back(UserTreeIdx);
1847 
1848     return Last;
1849   }
1850 
1851   /// -- Vectorization State --
1852   /// Holds all of the tree entries.
1853   TreeEntry::VecTreeTy VectorizableTree;
1854 
1855 #ifndef NDEBUG
1856   /// Debug printer.
1857   LLVM_DUMP_METHOD void dumpVectorizableTree() const {
1858     for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) {
1859       VectorizableTree[Id]->dump();
1860       dbgs() << "\n";
1861     }
1862   }
1863 #endif
1864 
1865   TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); }
1866 
1867   const TreeEntry *getTreeEntry(Value *V) const {
1868     return ScalarToTreeEntry.lookup(V);
1869   }
1870 
1871   /// Maps a specific scalar to its tree entry.
1872   SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry;
1873 
1874   /// Maps a value to the proposed vectorizable size.
1875   SmallDenseMap<Value *, unsigned> InstrElementSize;
1876 
1877   /// A list of scalars that we found that we need to keep as scalars.
1878   ValueSet MustGather;
1879 
1880   /// This POD struct describes one external user in the vectorized tree.
1881   struct ExternalUser {
1882     ExternalUser(Value *S, llvm::User *U, int L)
1883         : Scalar(S), User(U), Lane(L) {}
1884 
1885     // Which scalar in our function.
1886     Value *Scalar;
1887 
1888     // Which user that uses the scalar.
1889     llvm::User *User;
1890 
1891     // Which lane does the scalar belong to.
1892     int Lane;
1893   };
1894   using UserList = SmallVector<ExternalUser, 16>;
1895 
1896   /// Checks if two instructions may access the same memory.
1897   ///
1898   /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
1899   /// is invariant in the calling loop.
1900   bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1,
1901                  Instruction *Inst2) {
1902     // First check if the result is already in the cache.
1903     AliasCacheKey key = std::make_pair(Inst1, Inst2);
1904     Optional<bool> &result = AliasCache[key];
1905     if (result.hasValue()) {
1906       return result.getValue();
1907     }
1908     MemoryLocation Loc2 = getLocation(Inst2, AA);
1909     bool aliased = true;
1910     if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) {
1911       // Do the alias check.
1912       aliased = AA->alias(Loc1, Loc2);
1913     }
1914     // Store the result in the cache.
1915     result = aliased;
1916     return aliased;
1917   }
1918 
1919   using AliasCacheKey = std::pair<Instruction *, Instruction *>;
1920 
1921   /// Cache for alias results.
1922   /// TODO: consider moving this to the AliasAnalysis itself.
1923   DenseMap<AliasCacheKey, Optional<bool>> AliasCache;
1924 
1925   /// Removes an instruction from its block and eventually deletes it.
1926   /// It's like Instruction::eraseFromParent() except that the actual deletion
1927   /// is delayed until BoUpSLP is destructed.
1928   /// This is required to ensure that there are no incorrect collisions in the
1929   /// AliasCache, which can happen if a new instruction is allocated at the
1930   /// same address as a previously deleted instruction.
1931   void eraseInstruction(Instruction *I, bool ReplaceOpsWithUndef = false) {
1932     auto It = DeletedInstructions.try_emplace(I, ReplaceOpsWithUndef).first;
1933     It->getSecond() = It->getSecond() && ReplaceOpsWithUndef;
1934   }
1935 
1936   /// Temporary store for deleted instructions. Instructions will be deleted
1937   /// eventually when the BoUpSLP is destructed.
1938   DenseMap<Instruction *, bool> DeletedInstructions;
1939 
1940   /// A list of values that need to extracted out of the tree.
1941   /// This list holds pairs of (Internal Scalar : External User). External User
1942   /// can be nullptr, it means that this Internal Scalar will be used later,
1943   /// after vectorization.
1944   UserList ExternalUses;
1945 
1946   /// Values used only by @llvm.assume calls.
1947   SmallPtrSet<const Value *, 32> EphValues;
1948 
1949   /// Holds all of the instructions that we gathered.
1950   SetVector<Instruction *> GatherSeq;
1951 
1952   /// A list of blocks that we are going to CSE.
1953   SetVector<BasicBlock *> CSEBlocks;
1954 
1955   /// Contains all scheduling relevant data for an instruction.
1956   /// A ScheduleData either represents a single instruction or a member of an
1957   /// instruction bundle (= a group of instructions which is combined into a
1958   /// vector instruction).
1959   struct ScheduleData {
1960     // The initial value for the dependency counters. It means that the
1961     // dependencies are not calculated yet.
1962     enum { InvalidDeps = -1 };
1963 
1964     ScheduleData() = default;
1965 
1966     void init(int BlockSchedulingRegionID, Value *OpVal) {
1967       FirstInBundle = this;
1968       NextInBundle = nullptr;
1969       NextLoadStore = nullptr;
1970       IsScheduled = false;
1971       SchedulingRegionID = BlockSchedulingRegionID;
1972       UnscheduledDepsInBundle = UnscheduledDeps;
1973       clearDependencies();
1974       OpValue = OpVal;
1975       TE = nullptr;
1976       Lane = -1;
1977     }
1978 
1979     /// Returns true if the dependency information has been calculated.
1980     bool hasValidDependencies() const { return Dependencies != InvalidDeps; }
1981 
1982     /// Returns true for single instructions and for bundle representatives
1983     /// (= the head of a bundle).
1984     bool isSchedulingEntity() const { return FirstInBundle == this; }
1985 
1986     /// Returns true if it represents an instruction bundle and not only a
1987     /// single instruction.
1988     bool isPartOfBundle() const {
1989       return NextInBundle != nullptr || FirstInBundle != this;
1990     }
1991 
1992     /// Returns true if it is ready for scheduling, i.e. it has no more
1993     /// unscheduled depending instructions/bundles.
1994     bool isReady() const {
1995       assert(isSchedulingEntity() &&
1996              "can't consider non-scheduling entity for ready list");
1997       return UnscheduledDepsInBundle == 0 && !IsScheduled;
1998     }
1999 
2000     /// Modifies the number of unscheduled dependencies, also updating it for
2001     /// the whole bundle.
2002     int incrementUnscheduledDeps(int Incr) {
2003       UnscheduledDeps += Incr;
2004       return FirstInBundle->UnscheduledDepsInBundle += Incr;
2005     }
2006 
2007     /// Sets the number of unscheduled dependencies to the number of
2008     /// dependencies.
2009     void resetUnscheduledDeps() {
2010       incrementUnscheduledDeps(Dependencies - UnscheduledDeps);
2011     }
2012 
2013     /// Clears all dependency information.
2014     void clearDependencies() {
2015       Dependencies = InvalidDeps;
2016       resetUnscheduledDeps();
2017       MemoryDependencies.clear();
2018     }
2019 
2020     void dump(raw_ostream &os) const {
2021       if (!isSchedulingEntity()) {
2022         os << "/ " << *Inst;
2023       } else if (NextInBundle) {
2024         os << '[' << *Inst;
2025         ScheduleData *SD = NextInBundle;
2026         while (SD) {
2027           os << ';' << *SD->Inst;
2028           SD = SD->NextInBundle;
2029         }
2030         os << ']';
2031       } else {
2032         os << *Inst;
2033       }
2034     }
2035 
2036     Instruction *Inst = nullptr;
2037 
2038     /// Points to the head in an instruction bundle (and always to this for
2039     /// single instructions).
2040     ScheduleData *FirstInBundle = nullptr;
2041 
2042     /// Single linked list of all instructions in a bundle. Null if it is a
2043     /// single instruction.
2044     ScheduleData *NextInBundle = nullptr;
2045 
2046     /// Single linked list of all memory instructions (e.g. load, store, call)
2047     /// in the block - until the end of the scheduling region.
2048     ScheduleData *NextLoadStore = nullptr;
2049 
2050     /// The dependent memory instructions.
2051     /// This list is derived on demand in calculateDependencies().
2052     SmallVector<ScheduleData *, 4> MemoryDependencies;
2053 
2054     /// This ScheduleData is in the current scheduling region if this matches
2055     /// the current SchedulingRegionID of BlockScheduling.
2056     int SchedulingRegionID = 0;
2057 
2058     /// Used for getting a "good" final ordering of instructions.
2059     int SchedulingPriority = 0;
2060 
2061     /// The number of dependencies. Constitutes of the number of users of the
2062     /// instruction plus the number of dependent memory instructions (if any).
2063     /// This value is calculated on demand.
2064     /// If InvalidDeps, the number of dependencies is not calculated yet.
2065     int Dependencies = InvalidDeps;
2066 
2067     /// The number of dependencies minus the number of dependencies of scheduled
2068     /// instructions. As soon as this is zero, the instruction/bundle gets ready
2069     /// for scheduling.
2070     /// Note that this is negative as long as Dependencies is not calculated.
2071     int UnscheduledDeps = InvalidDeps;
2072 
2073     /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for
2074     /// single instructions.
2075     int UnscheduledDepsInBundle = InvalidDeps;
2076 
2077     /// True if this instruction is scheduled (or considered as scheduled in the
2078     /// dry-run).
2079     bool IsScheduled = false;
2080 
2081     /// Opcode of the current instruction in the schedule data.
2082     Value *OpValue = nullptr;
2083 
2084     /// The TreeEntry that this instruction corresponds to.
2085     TreeEntry *TE = nullptr;
2086 
2087     /// The lane of this node in the TreeEntry.
2088     int Lane = -1;
2089   };
2090 
2091 #ifndef NDEBUG
2092   friend inline raw_ostream &operator<<(raw_ostream &os,
2093                                         const BoUpSLP::ScheduleData &SD) {
2094     SD.dump(os);
2095     return os;
2096   }
2097 #endif
2098 
2099   friend struct GraphTraits<BoUpSLP *>;
2100   friend struct DOTGraphTraits<BoUpSLP *>;
2101 
2102   /// Contains all scheduling data for a basic block.
2103   struct BlockScheduling {
2104     BlockScheduling(BasicBlock *BB)
2105         : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {}
2106 
2107     void clear() {
2108       ReadyInsts.clear();
2109       ScheduleStart = nullptr;
2110       ScheduleEnd = nullptr;
2111       FirstLoadStoreInRegion = nullptr;
2112       LastLoadStoreInRegion = nullptr;
2113 
2114       // Reduce the maximum schedule region size by the size of the
2115       // previous scheduling run.
2116       ScheduleRegionSizeLimit -= ScheduleRegionSize;
2117       if (ScheduleRegionSizeLimit < MinScheduleRegionSize)
2118         ScheduleRegionSizeLimit = MinScheduleRegionSize;
2119       ScheduleRegionSize = 0;
2120 
2121       // Make a new scheduling region, i.e. all existing ScheduleData is not
2122       // in the new region yet.
2123       ++SchedulingRegionID;
2124     }
2125 
2126     ScheduleData *getScheduleData(Value *V) {
2127       ScheduleData *SD = ScheduleDataMap[V];
2128       if (SD && SD->SchedulingRegionID == SchedulingRegionID)
2129         return SD;
2130       return nullptr;
2131     }
2132 
2133     ScheduleData *getScheduleData(Value *V, Value *Key) {
2134       if (V == Key)
2135         return getScheduleData(V);
2136       auto I = ExtraScheduleDataMap.find(V);
2137       if (I != ExtraScheduleDataMap.end()) {
2138         ScheduleData *SD = I->second[Key];
2139         if (SD && SD->SchedulingRegionID == SchedulingRegionID)
2140           return SD;
2141       }
2142       return nullptr;
2143     }
2144 
2145     bool isInSchedulingRegion(ScheduleData *SD) const {
2146       return SD->SchedulingRegionID == SchedulingRegionID;
2147     }
2148 
2149     /// Marks an instruction as scheduled and puts all dependent ready
2150     /// instructions into the ready-list.
2151     template <typename ReadyListType>
2152     void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
2153       SD->IsScheduled = true;
2154       LLVM_DEBUG(dbgs() << "SLP:   schedule " << *SD << "\n");
2155 
2156       ScheduleData *BundleMember = SD;
2157       while (BundleMember) {
2158         if (BundleMember->Inst != BundleMember->OpValue) {
2159           BundleMember = BundleMember->NextInBundle;
2160           continue;
2161         }
2162         // Handle the def-use chain dependencies.
2163 
2164         // Decrement the unscheduled counter and insert to ready list if ready.
2165         auto &&DecrUnsched = [this, &ReadyList](Instruction *I) {
2166           doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) {
2167             if (OpDef && OpDef->hasValidDependencies() &&
2168                 OpDef->incrementUnscheduledDeps(-1) == 0) {
2169               // There are no more unscheduled dependencies after
2170               // decrementing, so we can put the dependent instruction
2171               // into the ready list.
2172               ScheduleData *DepBundle = OpDef->FirstInBundle;
2173               assert(!DepBundle->IsScheduled &&
2174                      "already scheduled bundle gets ready");
2175               ReadyList.insert(DepBundle);
2176               LLVM_DEBUG(dbgs()
2177                          << "SLP:    gets ready (def): " << *DepBundle << "\n");
2178             }
2179           });
2180         };
2181 
2182         // If BundleMember is a vector bundle, its operands may have been
2183         // reordered duiring buildTree(). We therefore need to get its operands
2184         // through the TreeEntry.
2185         if (TreeEntry *TE = BundleMember->TE) {
2186           int Lane = BundleMember->Lane;
2187           assert(Lane >= 0 && "Lane not set");
2188 
2189           // Since vectorization tree is being built recursively this assertion
2190           // ensures that the tree entry has all operands set before reaching
2191           // this code. Couple of exceptions known at the moment are extracts
2192           // where their second (immediate) operand is not added. Since
2193           // immediates do not affect scheduler behavior this is considered
2194           // okay.
2195           auto *In = TE->getMainOp();
2196           assert(In &&
2197                  (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) ||
2198                   In->getNumOperands() == TE->getNumOperands()) &&
2199                  "Missed TreeEntry operands?");
2200           (void)In; // fake use to avoid build failure when assertions disabled
2201 
2202           for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands();
2203                OpIdx != NumOperands; ++OpIdx)
2204             if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane]))
2205               DecrUnsched(I);
2206         } else {
2207           // If BundleMember is a stand-alone instruction, no operand reordering
2208           // has taken place, so we directly access its operands.
2209           for (Use &U : BundleMember->Inst->operands())
2210             if (auto *I = dyn_cast<Instruction>(U.get()))
2211               DecrUnsched(I);
2212         }
2213         // Handle the memory dependencies.
2214         for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) {
2215           if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) {
2216             // There are no more unscheduled dependencies after decrementing,
2217             // so we can put the dependent instruction into the ready list.
2218             ScheduleData *DepBundle = MemoryDepSD->FirstInBundle;
2219             assert(!DepBundle->IsScheduled &&
2220                    "already scheduled bundle gets ready");
2221             ReadyList.insert(DepBundle);
2222             LLVM_DEBUG(dbgs()
2223                        << "SLP:    gets ready (mem): " << *DepBundle << "\n");
2224           }
2225         }
2226         BundleMember = BundleMember->NextInBundle;
2227       }
2228     }
2229 
2230     void doForAllOpcodes(Value *V,
2231                          function_ref<void(ScheduleData *SD)> Action) {
2232       if (ScheduleData *SD = getScheduleData(V))
2233         Action(SD);
2234       auto I = ExtraScheduleDataMap.find(V);
2235       if (I != ExtraScheduleDataMap.end())
2236         for (auto &P : I->second)
2237           if (P.second->SchedulingRegionID == SchedulingRegionID)
2238             Action(P.second);
2239     }
2240 
2241     /// Put all instructions into the ReadyList which are ready for scheduling.
2242     template <typename ReadyListType>
2243     void initialFillReadyList(ReadyListType &ReadyList) {
2244       for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
2245         doForAllOpcodes(I, [&](ScheduleData *SD) {
2246           if (SD->isSchedulingEntity() && SD->isReady()) {
2247             ReadyList.insert(SD);
2248             LLVM_DEBUG(dbgs()
2249                        << "SLP:    initially in ready list: " << *I << "\n");
2250           }
2251         });
2252       }
2253     }
2254 
2255     /// Checks if a bundle of instructions can be scheduled, i.e. has no
2256     /// cyclic dependencies. This is only a dry-run, no instructions are
2257     /// actually moved at this stage.
2258     /// \returns the scheduling bundle. The returned Optional value is non-None
2259     /// if \p VL is allowed to be scheduled.
2260     Optional<ScheduleData *>
2261     tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
2262                       const InstructionsState &S);
2263 
2264     /// Un-bundles a group of instructions.
2265     void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue);
2266 
2267     /// Allocates schedule data chunk.
2268     ScheduleData *allocateScheduleDataChunks();
2269 
2270     /// Extends the scheduling region so that V is inside the region.
2271     /// \returns true if the region size is within the limit.
2272     bool extendSchedulingRegion(Value *V, const InstructionsState &S);
2273 
2274     /// Initialize the ScheduleData structures for new instructions in the
2275     /// scheduling region.
2276     void initScheduleData(Instruction *FromI, Instruction *ToI,
2277                           ScheduleData *PrevLoadStore,
2278                           ScheduleData *NextLoadStore);
2279 
2280     /// Updates the dependency information of a bundle and of all instructions/
2281     /// bundles which depend on the original bundle.
2282     void calculateDependencies(ScheduleData *SD, bool InsertInReadyList,
2283                                BoUpSLP *SLP);
2284 
2285     /// Sets all instruction in the scheduling region to un-scheduled.
2286     void resetSchedule();
2287 
2288     BasicBlock *BB;
2289 
2290     /// Simple memory allocation for ScheduleData.
2291     std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks;
2292 
2293     /// The size of a ScheduleData array in ScheduleDataChunks.
2294     int ChunkSize;
2295 
2296     /// The allocator position in the current chunk, which is the last entry
2297     /// of ScheduleDataChunks.
2298     int ChunkPos;
2299 
2300     /// Attaches ScheduleData to Instruction.
2301     /// Note that the mapping survives during all vectorization iterations, i.e.
2302     /// ScheduleData structures are recycled.
2303     DenseMap<Value *, ScheduleData *> ScheduleDataMap;
2304 
2305     /// Attaches ScheduleData to Instruction with the leading key.
2306     DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>>
2307         ExtraScheduleDataMap;
2308 
2309     struct ReadyList : SmallVector<ScheduleData *, 8> {
2310       void insert(ScheduleData *SD) { push_back(SD); }
2311     };
2312 
2313     /// The ready-list for scheduling (only used for the dry-run).
2314     ReadyList ReadyInsts;
2315 
2316     /// The first instruction of the scheduling region.
2317     Instruction *ScheduleStart = nullptr;
2318 
2319     /// The first instruction _after_ the scheduling region.
2320     Instruction *ScheduleEnd = nullptr;
2321 
2322     /// The first memory accessing instruction in the scheduling region
2323     /// (can be null).
2324     ScheduleData *FirstLoadStoreInRegion = nullptr;
2325 
2326     /// The last memory accessing instruction in the scheduling region
2327     /// (can be null).
2328     ScheduleData *LastLoadStoreInRegion = nullptr;
2329 
2330     /// The current size of the scheduling region.
2331     int ScheduleRegionSize = 0;
2332 
2333     /// The maximum size allowed for the scheduling region.
2334     int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget;
2335 
2336     /// The ID of the scheduling region. For a new vectorization iteration this
2337     /// is incremented which "removes" all ScheduleData from the region.
2338     // Make sure that the initial SchedulingRegionID is greater than the
2339     // initial SchedulingRegionID in ScheduleData (which is 0).
2340     int SchedulingRegionID = 1;
2341   };
2342 
2343   /// Attaches the BlockScheduling structures to basic blocks.
2344   MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules;
2345 
2346   /// Performs the "real" scheduling. Done before vectorization is actually
2347   /// performed in a basic block.
2348   void scheduleBlock(BlockScheduling *BS);
2349 
2350   /// List of users to ignore during scheduling and that don't need extracting.
2351   ArrayRef<Value *> UserIgnoreList;
2352 
2353   /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of
2354   /// sorted SmallVectors of unsigned.
2355   struct OrdersTypeDenseMapInfo {
2356     static OrdersType getEmptyKey() {
2357       OrdersType V;
2358       V.push_back(~1U);
2359       return V;
2360     }
2361 
2362     static OrdersType getTombstoneKey() {
2363       OrdersType V;
2364       V.push_back(~2U);
2365       return V;
2366     }
2367 
2368     static unsigned getHashValue(const OrdersType &V) {
2369       return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
2370     }
2371 
2372     static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) {
2373       return LHS == RHS;
2374     }
2375   };
2376 
2377   /// Contains orders of operations along with the number of bundles that have
2378   /// operations in this order. It stores only those orders that require
2379   /// reordering, if reordering is not required it is counted using \a
2380   /// NumOpsWantToKeepOriginalOrder.
2381   DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo> NumOpsWantToKeepOrder;
2382   /// Number of bundles that do not require reordering.
2383   unsigned NumOpsWantToKeepOriginalOrder = 0;
2384 
2385   // Analysis and block reference.
2386   Function *F;
2387   ScalarEvolution *SE;
2388   TargetTransformInfo *TTI;
2389   TargetLibraryInfo *TLI;
2390   AAResults *AA;
2391   LoopInfo *LI;
2392   DominatorTree *DT;
2393   AssumptionCache *AC;
2394   DemandedBits *DB;
2395   const DataLayout *DL;
2396   OptimizationRemarkEmitter *ORE;
2397 
2398   unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt.
2399   unsigned MinVecRegSize; // Set by cl::opt (default: 128).
2400 
2401   /// Instruction builder to construct the vectorized tree.
2402   IRBuilder<> Builder;
2403 
2404   /// A map of scalar integer values to the smallest bit width with which they
2405   /// can legally be represented. The values map to (width, signed) pairs,
2406   /// where "width" indicates the minimum bit width and "signed" is True if the
2407   /// value must be signed-extended, rather than zero-extended, back to its
2408   /// original width.
2409   MapVector<Value *, std::pair<uint64_t, bool>> MinBWs;
2410 };
2411 
2412 } // end namespace slpvectorizer
2413 
2414 template <> struct GraphTraits<BoUpSLP *> {
2415   using TreeEntry = BoUpSLP::TreeEntry;
2416 
2417   /// NodeRef has to be a pointer per the GraphWriter.
2418   using NodeRef = TreeEntry *;
2419 
2420   using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy;
2421 
2422   /// Add the VectorizableTree to the index iterator to be able to return
2423   /// TreeEntry pointers.
2424   struct ChildIteratorType
2425       : public iterator_adaptor_base<
2426             ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> {
2427     ContainerTy &VectorizableTree;
2428 
2429     ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W,
2430                       ContainerTy &VT)
2431         : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {}
2432 
2433     NodeRef operator*() { return I->UserTE; }
2434   };
2435 
2436   static NodeRef getEntryNode(BoUpSLP &R) {
2437     return R.VectorizableTree[0].get();
2438   }
2439 
2440   static ChildIteratorType child_begin(NodeRef N) {
2441     return {N->UserTreeIndices.begin(), N->Container};
2442   }
2443 
2444   static ChildIteratorType child_end(NodeRef N) {
2445     return {N->UserTreeIndices.end(), N->Container};
2446   }
2447 
2448   /// For the node iterator we just need to turn the TreeEntry iterator into a
2449   /// TreeEntry* iterator so that it dereferences to NodeRef.
2450   class nodes_iterator {
2451     using ItTy = ContainerTy::iterator;
2452     ItTy It;
2453 
2454   public:
2455     nodes_iterator(const ItTy &It2) : It(It2) {}
2456     NodeRef operator*() { return It->get(); }
2457     nodes_iterator operator++() {
2458       ++It;
2459       return *this;
2460     }
2461     bool operator!=(const nodes_iterator &N2) const { return N2.It != It; }
2462   };
2463 
2464   static nodes_iterator nodes_begin(BoUpSLP *R) {
2465     return nodes_iterator(R->VectorizableTree.begin());
2466   }
2467 
2468   static nodes_iterator nodes_end(BoUpSLP *R) {
2469     return nodes_iterator(R->VectorizableTree.end());
2470   }
2471 
2472   static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); }
2473 };
2474 
2475 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits {
2476   using TreeEntry = BoUpSLP::TreeEntry;
2477 
2478   DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
2479 
2480   std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) {
2481     std::string Str;
2482     raw_string_ostream OS(Str);
2483     if (isSplat(Entry->Scalars)) {
2484       OS << "<splat> " << *Entry->Scalars[0];
2485       return Str;
2486     }
2487     for (auto V : Entry->Scalars) {
2488       OS << *V;
2489       if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) {
2490             return EU.Scalar == V;
2491           }))
2492         OS << " <extract>";
2493       OS << "\n";
2494     }
2495     return Str;
2496   }
2497 
2498   static std::string getNodeAttributes(const TreeEntry *Entry,
2499                                        const BoUpSLP *) {
2500     if (Entry->State == TreeEntry::NeedToGather)
2501       return "color=red";
2502     return "";
2503   }
2504 };
2505 
2506 } // end namespace llvm
2507 
2508 BoUpSLP::~BoUpSLP() {
2509   for (const auto &Pair : DeletedInstructions) {
2510     // Replace operands of ignored instructions with Undefs in case if they were
2511     // marked for deletion.
2512     if (Pair.getSecond()) {
2513       Value *Undef = UndefValue::get(Pair.getFirst()->getType());
2514       Pair.getFirst()->replaceAllUsesWith(Undef);
2515     }
2516     Pair.getFirst()->dropAllReferences();
2517   }
2518   for (const auto &Pair : DeletedInstructions) {
2519     assert(Pair.getFirst()->use_empty() &&
2520            "trying to erase instruction with users.");
2521     Pair.getFirst()->eraseFromParent();
2522   }
2523 #ifdef EXPENSIVE_CHECKS
2524   // If we could guarantee that this call is not extremely slow, we could
2525   // remove the ifdef limitation (see PR47712).
2526   assert(!verifyFunction(*F, &dbgs()));
2527 #endif
2528 }
2529 
2530 void BoUpSLP::eraseInstructions(ArrayRef<Value *> AV) {
2531   for (auto *V : AV) {
2532     if (auto *I = dyn_cast<Instruction>(V))
2533       eraseInstruction(I, /*ReplaceOpsWithUndef=*/true);
2534   };
2535 }
2536 
2537 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
2538                         ArrayRef<Value *> UserIgnoreLst) {
2539   ExtraValueToDebugLocsMap ExternallyUsedValues;
2540   buildTree(Roots, ExternallyUsedValues, UserIgnoreLst);
2541 }
2542 
2543 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
2544                         ExtraValueToDebugLocsMap &ExternallyUsedValues,
2545                         ArrayRef<Value *> UserIgnoreLst) {
2546   deleteTree();
2547   UserIgnoreList = UserIgnoreLst;
2548   if (!allSameType(Roots))
2549     return;
2550   buildTree_rec(Roots, 0, EdgeInfo());
2551 
2552   // Collect the values that we need to extract from the tree.
2553   for (auto &TEPtr : VectorizableTree) {
2554     TreeEntry *Entry = TEPtr.get();
2555 
2556     // No need to handle users of gathered values.
2557     if (Entry->State == TreeEntry::NeedToGather)
2558       continue;
2559 
2560     // For each lane:
2561     for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
2562       Value *Scalar = Entry->Scalars[Lane];
2563       int FoundLane = Lane;
2564       if (!Entry->ReuseShuffleIndices.empty()) {
2565         FoundLane =
2566             std::distance(Entry->ReuseShuffleIndices.begin(),
2567                           llvm::find(Entry->ReuseShuffleIndices, FoundLane));
2568       }
2569 
2570       // Check if the scalar is externally used as an extra arg.
2571       auto ExtI = ExternallyUsedValues.find(Scalar);
2572       if (ExtI != ExternallyUsedValues.end()) {
2573         LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane "
2574                           << Lane << " from " << *Scalar << ".\n");
2575         ExternalUses.emplace_back(Scalar, nullptr, FoundLane);
2576       }
2577       for (User *U : Scalar->users()) {
2578         LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
2579 
2580         Instruction *UserInst = dyn_cast<Instruction>(U);
2581         if (!UserInst)
2582           continue;
2583 
2584         // Skip in-tree scalars that become vectors
2585         if (TreeEntry *UseEntry = getTreeEntry(U)) {
2586           Value *UseScalar = UseEntry->Scalars[0];
2587           // Some in-tree scalars will remain as scalar in vectorized
2588           // instructions. If that is the case, the one in Lane 0 will
2589           // be used.
2590           if (UseScalar != U ||
2591               UseEntry->State == TreeEntry::ScatterVectorize ||
2592               !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) {
2593             LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
2594                               << ".\n");
2595             assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state");
2596             continue;
2597           }
2598         }
2599 
2600         // Ignore users in the user ignore list.
2601         if (is_contained(UserIgnoreList, UserInst))
2602           continue;
2603 
2604         LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane "
2605                           << Lane << " from " << *Scalar << ".\n");
2606         ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane));
2607       }
2608     }
2609   }
2610 }
2611 
2612 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
2613                             const EdgeInfo &UserTreeIdx) {
2614   assert((allConstant(VL) || allSameType(VL)) && "Invalid types!");
2615 
2616   InstructionsState S = getSameOpcode(VL);
2617   if (Depth == RecursionMaxDepth) {
2618     LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
2619     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2620     return;
2621   }
2622 
2623   // Don't handle vectors.
2624   if (S.OpValue->getType()->isVectorTy()) {
2625     LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
2626     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2627     return;
2628   }
2629 
2630   if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue))
2631     if (SI->getValueOperand()->getType()->isVectorTy()) {
2632       LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
2633       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2634       return;
2635     }
2636 
2637   // If all of the operands are identical or constant we have a simple solution.
2638   if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.getOpcode()) {
2639     LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
2640     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2641     return;
2642   }
2643 
2644   // We now know that this is a vector of instructions of the same type from
2645   // the same block.
2646 
2647   // Don't vectorize ephemeral values.
2648   for (Value *V : VL) {
2649     if (EphValues.count(V)) {
2650       LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
2651                         << ") is ephemeral.\n");
2652       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2653       return;
2654     }
2655   }
2656 
2657   // Check if this is a duplicate of another entry.
2658   if (TreeEntry *E = getTreeEntry(S.OpValue)) {
2659     LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n");
2660     if (!E->isSame(VL)) {
2661       LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
2662       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2663       return;
2664     }
2665     // Record the reuse of the tree node.  FIXME, currently this is only used to
2666     // properly draw the graph rather than for the actual vectorization.
2667     E->UserTreeIndices.push_back(UserTreeIdx);
2668     LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue
2669                       << ".\n");
2670     return;
2671   }
2672 
2673   // Check that none of the instructions in the bundle are already in the tree.
2674   for (Value *V : VL) {
2675     auto *I = dyn_cast<Instruction>(V);
2676     if (!I)
2677       continue;
2678     if (getTreeEntry(I)) {
2679       LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
2680                         << ") is already in tree.\n");
2681       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2682       return;
2683     }
2684   }
2685 
2686   // If any of the scalars is marked as a value that needs to stay scalar, then
2687   // we need to gather the scalars.
2688   // The reduction nodes (stored in UserIgnoreList) also should stay scalar.
2689   for (Value *V : VL) {
2690     if (MustGather.count(V) || is_contained(UserIgnoreList, V)) {
2691       LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
2692       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2693       return;
2694     }
2695   }
2696 
2697   // Check that all of the users of the scalars that we want to vectorize are
2698   // schedulable.
2699   auto *VL0 = cast<Instruction>(S.OpValue);
2700   BasicBlock *BB = VL0->getParent();
2701 
2702   if (!DT->isReachableFromEntry(BB)) {
2703     // Don't go into unreachable blocks. They may contain instructions with
2704     // dependency cycles which confuse the final scheduling.
2705     LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
2706     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2707     return;
2708   }
2709 
2710   // Check that every instruction appears once in this bundle.
2711   SmallVector<unsigned, 4> ReuseShuffleIndicies;
2712   SmallVector<Value *, 4> UniqueValues;
2713   DenseMap<Value *, unsigned> UniquePositions;
2714   for (Value *V : VL) {
2715     auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
2716     ReuseShuffleIndicies.emplace_back(Res.first->second);
2717     if (Res.second)
2718       UniqueValues.emplace_back(V);
2719   }
2720   size_t NumUniqueScalarValues = UniqueValues.size();
2721   if (NumUniqueScalarValues == VL.size()) {
2722     ReuseShuffleIndicies.clear();
2723   } else {
2724     LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n");
2725     if (NumUniqueScalarValues <= 1 ||
2726         !llvm::isPowerOf2_32(NumUniqueScalarValues)) {
2727       LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
2728       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2729       return;
2730     }
2731     VL = UniqueValues;
2732   }
2733 
2734   auto &BSRef = BlocksSchedules[BB];
2735   if (!BSRef)
2736     BSRef = std::make_unique<BlockScheduling>(BB);
2737 
2738   BlockScheduling &BS = *BSRef.get();
2739 
2740   Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S);
2741   if (!Bundle) {
2742     LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
2743     assert((!BS.getScheduleData(VL0) ||
2744             !BS.getScheduleData(VL0)->isPartOfBundle()) &&
2745            "tryScheduleBundle should cancelScheduling on failure");
2746     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2747                  ReuseShuffleIndicies);
2748     return;
2749   }
2750   LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
2751 
2752   unsigned ShuffleOrOp = S.isAltShuffle() ?
2753                 (unsigned) Instruction::ShuffleVector : S.getOpcode();
2754   switch (ShuffleOrOp) {
2755     case Instruction::PHI: {
2756       auto *PH = cast<PHINode>(VL0);
2757 
2758       // Check for terminator values (e.g. invoke).
2759       for (Value *V : VL)
2760         for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) {
2761           Instruction *Term = dyn_cast<Instruction>(
2762               cast<PHINode>(V)->getIncomingValueForBlock(
2763                   PH->getIncomingBlock(I)));
2764           if (Term && Term->isTerminator()) {
2765             LLVM_DEBUG(dbgs()
2766                        << "SLP: Need to swizzle PHINodes (terminator use).\n");
2767             BS.cancelScheduling(VL, VL0);
2768             newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2769                          ReuseShuffleIndicies);
2770             return;
2771           }
2772         }
2773 
2774       TreeEntry *TE =
2775           newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies);
2776       LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
2777 
2778       // Keeps the reordered operands to avoid code duplication.
2779       SmallVector<ValueList, 2> OperandsVec;
2780       for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) {
2781         ValueList Operands;
2782         // Prepare the operand vector.
2783         for (Value *V : VL)
2784           Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(
2785               PH->getIncomingBlock(I)));
2786         TE->setOperand(I, Operands);
2787         OperandsVec.push_back(Operands);
2788       }
2789       for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx)
2790         buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx});
2791       return;
2792     }
2793     case Instruction::ExtractValue:
2794     case Instruction::ExtractElement: {
2795       OrdersType CurrentOrder;
2796       bool Reuse = canReuseExtract(VL, VL0, CurrentOrder);
2797       if (Reuse) {
2798         LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n");
2799         ++NumOpsWantToKeepOriginalOrder;
2800         newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2801                      ReuseShuffleIndicies);
2802         // This is a special case, as it does not gather, but at the same time
2803         // we are not extending buildTree_rec() towards the operands.
2804         ValueList Op0;
2805         Op0.assign(VL.size(), VL0->getOperand(0));
2806         VectorizableTree.back()->setOperand(0, Op0);
2807         return;
2808       }
2809       if (!CurrentOrder.empty()) {
2810         LLVM_DEBUG({
2811           dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
2812                     "with order";
2813           for (unsigned Idx : CurrentOrder)
2814             dbgs() << " " << Idx;
2815           dbgs() << "\n";
2816         });
2817         // Insert new order with initial value 0, if it does not exist,
2818         // otherwise return the iterator to the existing one.
2819         newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2820                      ReuseShuffleIndicies, CurrentOrder);
2821         findRootOrder(CurrentOrder);
2822         ++NumOpsWantToKeepOrder[CurrentOrder];
2823         // This is a special case, as it does not gather, but at the same time
2824         // we are not extending buildTree_rec() towards the operands.
2825         ValueList Op0;
2826         Op0.assign(VL.size(), VL0->getOperand(0));
2827         VectorizableTree.back()->setOperand(0, Op0);
2828         return;
2829       }
2830       LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n");
2831       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2832                    ReuseShuffleIndicies);
2833       BS.cancelScheduling(VL, VL0);
2834       return;
2835     }
2836     case Instruction::Load: {
2837       // Check that a vectorized load would load the same memory as a scalar
2838       // load. For example, we don't want to vectorize loads that are smaller
2839       // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
2840       // treats loading/storing it as an i8 struct. If we vectorize loads/stores
2841       // from such a struct, we read/write packed bits disagreeing with the
2842       // unvectorized version.
2843       Type *ScalarTy = VL0->getType();
2844 
2845       if (DL->getTypeSizeInBits(ScalarTy) !=
2846           DL->getTypeAllocSizeInBits(ScalarTy)) {
2847         BS.cancelScheduling(VL, VL0);
2848         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2849                      ReuseShuffleIndicies);
2850         LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
2851         return;
2852       }
2853 
2854       // Make sure all loads in the bundle are simple - we can't vectorize
2855       // atomic or volatile loads.
2856       SmallVector<Value *, 4> PointerOps(VL.size());
2857       auto POIter = PointerOps.begin();
2858       for (Value *V : VL) {
2859         auto *L = cast<LoadInst>(V);
2860         if (!L->isSimple()) {
2861           BS.cancelScheduling(VL, VL0);
2862           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2863                        ReuseShuffleIndicies);
2864           LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
2865           return;
2866         }
2867         *POIter = L->getPointerOperand();
2868         ++POIter;
2869       }
2870 
2871       OrdersType CurrentOrder;
2872       // Check the order of pointer operands.
2873       if (llvm::sortPtrAccesses(PointerOps, *DL, *SE, CurrentOrder)) {
2874         Value *Ptr0;
2875         Value *PtrN;
2876         if (CurrentOrder.empty()) {
2877           Ptr0 = PointerOps.front();
2878           PtrN = PointerOps.back();
2879         } else {
2880           Ptr0 = PointerOps[CurrentOrder.front()];
2881           PtrN = PointerOps[CurrentOrder.back()];
2882         }
2883         Optional<int> Diff = getPointersDiff(Ptr0, PtrN, *DL, *SE);
2884         // Check that the sorted loads are consecutive.
2885         if (static_cast<unsigned>(*Diff) == VL.size() - 1) {
2886           if (CurrentOrder.empty()) {
2887             // Original loads are consecutive and does not require reordering.
2888             ++NumOpsWantToKeepOriginalOrder;
2889             TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S,
2890                                          UserTreeIdx, ReuseShuffleIndicies);
2891             TE->setOperandsInOrder();
2892             LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n");
2893           } else {
2894             // Need to reorder.
2895             TreeEntry *TE =
2896                 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2897                              ReuseShuffleIndicies, CurrentOrder);
2898             TE->setOperandsInOrder();
2899             LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n");
2900             findRootOrder(CurrentOrder);
2901             ++NumOpsWantToKeepOrder[CurrentOrder];
2902           }
2903           return;
2904         }
2905         // Vectorizing non-consecutive loads with `llvm.masked.gather`.
2906         TreeEntry *TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S,
2907                                      UserTreeIdx, ReuseShuffleIndicies);
2908         TE->setOperandsInOrder();
2909         buildTree_rec(PointerOps, Depth + 1, {TE, 0});
2910         LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n");
2911         return;
2912       }
2913 
2914       LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
2915       BS.cancelScheduling(VL, VL0);
2916       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2917                    ReuseShuffleIndicies);
2918       return;
2919     }
2920     case Instruction::ZExt:
2921     case Instruction::SExt:
2922     case Instruction::FPToUI:
2923     case Instruction::FPToSI:
2924     case Instruction::FPExt:
2925     case Instruction::PtrToInt:
2926     case Instruction::IntToPtr:
2927     case Instruction::SIToFP:
2928     case Instruction::UIToFP:
2929     case Instruction::Trunc:
2930     case Instruction::FPTrunc:
2931     case Instruction::BitCast: {
2932       Type *SrcTy = VL0->getOperand(0)->getType();
2933       for (Value *V : VL) {
2934         Type *Ty = cast<Instruction>(V)->getOperand(0)->getType();
2935         if (Ty != SrcTy || !isValidElementType(Ty)) {
2936           BS.cancelScheduling(VL, VL0);
2937           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2938                        ReuseShuffleIndicies);
2939           LLVM_DEBUG(dbgs()
2940                      << "SLP: Gathering casts with different src types.\n");
2941           return;
2942         }
2943       }
2944       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2945                                    ReuseShuffleIndicies);
2946       LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n");
2947 
2948       TE->setOperandsInOrder();
2949       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
2950         ValueList Operands;
2951         // Prepare the operand vector.
2952         for (Value *V : VL)
2953           Operands.push_back(cast<Instruction>(V)->getOperand(i));
2954 
2955         buildTree_rec(Operands, Depth + 1, {TE, i});
2956       }
2957       return;
2958     }
2959     case Instruction::ICmp:
2960     case Instruction::FCmp: {
2961       // Check that all of the compares have the same predicate.
2962       CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
2963       CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0);
2964       Type *ComparedTy = VL0->getOperand(0)->getType();
2965       for (Value *V : VL) {
2966         CmpInst *Cmp = cast<CmpInst>(V);
2967         if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) ||
2968             Cmp->getOperand(0)->getType() != ComparedTy) {
2969           BS.cancelScheduling(VL, VL0);
2970           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2971                        ReuseShuffleIndicies);
2972           LLVM_DEBUG(dbgs()
2973                      << "SLP: Gathering cmp with different predicate.\n");
2974           return;
2975         }
2976       }
2977 
2978       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2979                                    ReuseShuffleIndicies);
2980       LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n");
2981 
2982       ValueList Left, Right;
2983       if (cast<CmpInst>(VL0)->isCommutative()) {
2984         // Commutative predicate - collect + sort operands of the instructions
2985         // so that each side is more likely to have the same opcode.
2986         assert(P0 == SwapP0 && "Commutative Predicate mismatch");
2987         reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
2988       } else {
2989         // Collect operands - commute if it uses the swapped predicate.
2990         for (Value *V : VL) {
2991           auto *Cmp = cast<CmpInst>(V);
2992           Value *LHS = Cmp->getOperand(0);
2993           Value *RHS = Cmp->getOperand(1);
2994           if (Cmp->getPredicate() != P0)
2995             std::swap(LHS, RHS);
2996           Left.push_back(LHS);
2997           Right.push_back(RHS);
2998         }
2999       }
3000       TE->setOperand(0, Left);
3001       TE->setOperand(1, Right);
3002       buildTree_rec(Left, Depth + 1, {TE, 0});
3003       buildTree_rec(Right, Depth + 1, {TE, 1});
3004       return;
3005     }
3006     case Instruction::Select:
3007     case Instruction::FNeg:
3008     case Instruction::Add:
3009     case Instruction::FAdd:
3010     case Instruction::Sub:
3011     case Instruction::FSub:
3012     case Instruction::Mul:
3013     case Instruction::FMul:
3014     case Instruction::UDiv:
3015     case Instruction::SDiv:
3016     case Instruction::FDiv:
3017     case Instruction::URem:
3018     case Instruction::SRem:
3019     case Instruction::FRem:
3020     case Instruction::Shl:
3021     case Instruction::LShr:
3022     case Instruction::AShr:
3023     case Instruction::And:
3024     case Instruction::Or:
3025     case Instruction::Xor: {
3026       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3027                                    ReuseShuffleIndicies);
3028       LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n");
3029 
3030       // Sort operands of the instructions so that each side is more likely to
3031       // have the same opcode.
3032       if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
3033         ValueList Left, Right;
3034         reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
3035         TE->setOperand(0, Left);
3036         TE->setOperand(1, Right);
3037         buildTree_rec(Left, Depth + 1, {TE, 0});
3038         buildTree_rec(Right, Depth + 1, {TE, 1});
3039         return;
3040       }
3041 
3042       TE->setOperandsInOrder();
3043       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
3044         ValueList Operands;
3045         // Prepare the operand vector.
3046         for (Value *V : VL)
3047           Operands.push_back(cast<Instruction>(V)->getOperand(i));
3048 
3049         buildTree_rec(Operands, Depth + 1, {TE, i});
3050       }
3051       return;
3052     }
3053     case Instruction::GetElementPtr: {
3054       // We don't combine GEPs with complicated (nested) indexing.
3055       for (Value *V : VL) {
3056         if (cast<Instruction>(V)->getNumOperands() != 2) {
3057           LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
3058           BS.cancelScheduling(VL, VL0);
3059           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3060                        ReuseShuffleIndicies);
3061           return;
3062         }
3063       }
3064 
3065       // We can't combine several GEPs into one vector if they operate on
3066       // different types.
3067       Type *Ty0 = VL0->getOperand(0)->getType();
3068       for (Value *V : VL) {
3069         Type *CurTy = cast<Instruction>(V)->getOperand(0)->getType();
3070         if (Ty0 != CurTy) {
3071           LLVM_DEBUG(dbgs()
3072                      << "SLP: not-vectorizable GEP (different types).\n");
3073           BS.cancelScheduling(VL, VL0);
3074           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3075                        ReuseShuffleIndicies);
3076           return;
3077         }
3078       }
3079 
3080       // We don't combine GEPs with non-constant indexes.
3081       Type *Ty1 = VL0->getOperand(1)->getType();
3082       for (Value *V : VL) {
3083         auto Op = cast<Instruction>(V)->getOperand(1);
3084         if (!isa<ConstantInt>(Op) ||
3085             (Op->getType() != Ty1 &&
3086              Op->getType()->getScalarSizeInBits() >
3087                  DL->getIndexSizeInBits(
3088                      V->getType()->getPointerAddressSpace()))) {
3089           LLVM_DEBUG(dbgs()
3090                      << "SLP: not-vectorizable GEP (non-constant indexes).\n");
3091           BS.cancelScheduling(VL, VL0);
3092           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3093                        ReuseShuffleIndicies);
3094           return;
3095         }
3096       }
3097 
3098       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3099                                    ReuseShuffleIndicies);
3100       LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
3101       TE->setOperandsInOrder();
3102       for (unsigned i = 0, e = 2; i < e; ++i) {
3103         ValueList Operands;
3104         // Prepare the operand vector.
3105         for (Value *V : VL)
3106           Operands.push_back(cast<Instruction>(V)->getOperand(i));
3107 
3108         buildTree_rec(Operands, Depth + 1, {TE, i});
3109       }
3110       return;
3111     }
3112     case Instruction::Store: {
3113       // Check if the stores are consecutive or if we need to swizzle them.
3114       llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType();
3115       // Avoid types that are padded when being allocated as scalars, while
3116       // being packed together in a vector (such as i1).
3117       if (DL->getTypeSizeInBits(ScalarTy) !=
3118           DL->getTypeAllocSizeInBits(ScalarTy)) {
3119         BS.cancelScheduling(VL, VL0);
3120         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3121                      ReuseShuffleIndicies);
3122         LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n");
3123         return;
3124       }
3125       // Make sure all stores in the bundle are simple - we can't vectorize
3126       // atomic or volatile stores.
3127       SmallVector<Value *, 4> PointerOps(VL.size());
3128       ValueList Operands(VL.size());
3129       auto POIter = PointerOps.begin();
3130       auto OIter = Operands.begin();
3131       for (Value *V : VL) {
3132         auto *SI = cast<StoreInst>(V);
3133         if (!SI->isSimple()) {
3134           BS.cancelScheduling(VL, VL0);
3135           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3136                        ReuseShuffleIndicies);
3137           LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n");
3138           return;
3139         }
3140         *POIter = SI->getPointerOperand();
3141         *OIter = SI->getValueOperand();
3142         ++POIter;
3143         ++OIter;
3144       }
3145 
3146       OrdersType CurrentOrder;
3147       // Check the order of pointer operands.
3148       if (llvm::sortPtrAccesses(PointerOps, *DL, *SE, CurrentOrder)) {
3149         Value *Ptr0;
3150         Value *PtrN;
3151         if (CurrentOrder.empty()) {
3152           Ptr0 = PointerOps.front();
3153           PtrN = PointerOps.back();
3154         } else {
3155           Ptr0 = PointerOps[CurrentOrder.front()];
3156           PtrN = PointerOps[CurrentOrder.back()];
3157         }
3158         Optional<int> Dist = getPointersDiff(Ptr0, PtrN, *DL, *SE);
3159         // Check that the sorted pointer operands are consecutive.
3160         if (static_cast<unsigned>(*Dist) == VL.size() - 1) {
3161           if (CurrentOrder.empty()) {
3162             // Original stores are consecutive and does not require reordering.
3163             ++NumOpsWantToKeepOriginalOrder;
3164             TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S,
3165                                          UserTreeIdx, ReuseShuffleIndicies);
3166             TE->setOperandsInOrder();
3167             buildTree_rec(Operands, Depth + 1, {TE, 0});
3168             LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
3169           } else {
3170             TreeEntry *TE =
3171                 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3172                              ReuseShuffleIndicies, CurrentOrder);
3173             TE->setOperandsInOrder();
3174             buildTree_rec(Operands, Depth + 1, {TE, 0});
3175             LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n");
3176             findRootOrder(CurrentOrder);
3177             ++NumOpsWantToKeepOrder[CurrentOrder];
3178           }
3179           return;
3180         }
3181       }
3182 
3183       BS.cancelScheduling(VL, VL0);
3184       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3185                    ReuseShuffleIndicies);
3186       LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
3187       return;
3188     }
3189     case Instruction::Call: {
3190       // Check if the calls are all to the same vectorizable intrinsic or
3191       // library function.
3192       CallInst *CI = cast<CallInst>(VL0);
3193       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3194 
3195       VFShape Shape = VFShape::get(
3196           *CI, ElementCount::getFixed(static_cast<unsigned int>(VL.size())),
3197           false /*HasGlobalPred*/);
3198       Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3199 
3200       if (!VecFunc && !isTriviallyVectorizable(ID)) {
3201         BS.cancelScheduling(VL, VL0);
3202         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3203                      ReuseShuffleIndicies);
3204         LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
3205         return;
3206       }
3207       Function *F = CI->getCalledFunction();
3208       unsigned NumArgs = CI->getNumArgOperands();
3209       SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr);
3210       for (unsigned j = 0; j != NumArgs; ++j)
3211         if (hasVectorInstrinsicScalarOpd(ID, j))
3212           ScalarArgs[j] = CI->getArgOperand(j);
3213       for (Value *V : VL) {
3214         CallInst *CI2 = dyn_cast<CallInst>(V);
3215         if (!CI2 || CI2->getCalledFunction() != F ||
3216             getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
3217             (VecFunc &&
3218              VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) ||
3219             !CI->hasIdenticalOperandBundleSchema(*CI2)) {
3220           BS.cancelScheduling(VL, VL0);
3221           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3222                        ReuseShuffleIndicies);
3223           LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V
3224                             << "\n");
3225           return;
3226         }
3227         // Some intrinsics have scalar arguments and should be same in order for
3228         // them to be vectorized.
3229         for (unsigned j = 0; j != NumArgs; ++j) {
3230           if (hasVectorInstrinsicScalarOpd(ID, j)) {
3231             Value *A1J = CI2->getArgOperand(j);
3232             if (ScalarArgs[j] != A1J) {
3233               BS.cancelScheduling(VL, VL0);
3234               newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3235                            ReuseShuffleIndicies);
3236               LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
3237                                 << " argument " << ScalarArgs[j] << "!=" << A1J
3238                                 << "\n");
3239               return;
3240             }
3241           }
3242         }
3243         // Verify that the bundle operands are identical between the two calls.
3244         if (CI->hasOperandBundles() &&
3245             !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(),
3246                         CI->op_begin() + CI->getBundleOperandsEndIndex(),
3247                         CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
3248           BS.cancelScheduling(VL, VL0);
3249           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3250                        ReuseShuffleIndicies);
3251           LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:"
3252                             << *CI << "!=" << *V << '\n');
3253           return;
3254         }
3255       }
3256 
3257       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3258                                    ReuseShuffleIndicies);
3259       TE->setOperandsInOrder();
3260       for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
3261         ValueList Operands;
3262         // Prepare the operand vector.
3263         for (Value *V : VL) {
3264           auto *CI2 = cast<CallInst>(V);
3265           Operands.push_back(CI2->getArgOperand(i));
3266         }
3267         buildTree_rec(Operands, Depth + 1, {TE, i});
3268       }
3269       return;
3270     }
3271     case Instruction::ShuffleVector: {
3272       // If this is not an alternate sequence of opcode like add-sub
3273       // then do not vectorize this instruction.
3274       if (!S.isAltShuffle()) {
3275         BS.cancelScheduling(VL, VL0);
3276         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3277                      ReuseShuffleIndicies);
3278         LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
3279         return;
3280       }
3281       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3282                                    ReuseShuffleIndicies);
3283       LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
3284 
3285       // Reorder operands if reordering would enable vectorization.
3286       if (isa<BinaryOperator>(VL0)) {
3287         ValueList Left, Right;
3288         reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
3289         TE->setOperand(0, Left);
3290         TE->setOperand(1, Right);
3291         buildTree_rec(Left, Depth + 1, {TE, 0});
3292         buildTree_rec(Right, Depth + 1, {TE, 1});
3293         return;
3294       }
3295 
3296       TE->setOperandsInOrder();
3297       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
3298         ValueList Operands;
3299         // Prepare the operand vector.
3300         for (Value *V : VL)
3301           Operands.push_back(cast<Instruction>(V)->getOperand(i));
3302 
3303         buildTree_rec(Operands, Depth + 1, {TE, i});
3304       }
3305       return;
3306     }
3307     default:
3308       BS.cancelScheduling(VL, VL0);
3309       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3310                    ReuseShuffleIndicies);
3311       LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
3312       return;
3313   }
3314 }
3315 
3316 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const {
3317   unsigned N = 1;
3318   Type *EltTy = T;
3319 
3320   while (isa<StructType>(EltTy) || isa<ArrayType>(EltTy) ||
3321          isa<VectorType>(EltTy)) {
3322     if (auto *ST = dyn_cast<StructType>(EltTy)) {
3323       // Check that struct is homogeneous.
3324       for (const auto *Ty : ST->elements())
3325         if (Ty != *ST->element_begin())
3326           return 0;
3327       N *= ST->getNumElements();
3328       EltTy = *ST->element_begin();
3329     } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) {
3330       N *= AT->getNumElements();
3331       EltTy = AT->getElementType();
3332     } else {
3333       auto *VT = cast<FixedVectorType>(EltTy);
3334       N *= VT->getNumElements();
3335       EltTy = VT->getElementType();
3336     }
3337   }
3338 
3339   if (!isValidElementType(EltTy))
3340     return 0;
3341   uint64_t VTSize = DL.getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N));
3342   if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T))
3343     return 0;
3344   return N;
3345 }
3346 
3347 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
3348                               SmallVectorImpl<unsigned> &CurrentOrder) const {
3349   Instruction *E0 = cast<Instruction>(OpValue);
3350   assert(E0->getOpcode() == Instruction::ExtractElement ||
3351          E0->getOpcode() == Instruction::ExtractValue);
3352   assert(E0->getOpcode() == getSameOpcode(VL).getOpcode() && "Invalid opcode");
3353   // Check if all of the extracts come from the same vector and from the
3354   // correct offset.
3355   Value *Vec = E0->getOperand(0);
3356 
3357   CurrentOrder.clear();
3358 
3359   // We have to extract from a vector/aggregate with the same number of elements.
3360   unsigned NElts;
3361   if (E0->getOpcode() == Instruction::ExtractValue) {
3362     const DataLayout &DL = E0->getModule()->getDataLayout();
3363     NElts = canMapToVector(Vec->getType(), DL);
3364     if (!NElts)
3365       return false;
3366     // Check if load can be rewritten as load of vector.
3367     LoadInst *LI = dyn_cast<LoadInst>(Vec);
3368     if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size()))
3369       return false;
3370   } else {
3371     NElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
3372   }
3373 
3374   if (NElts != VL.size())
3375     return false;
3376 
3377   // Check that all of the indices extract from the correct offset.
3378   bool ShouldKeepOrder = true;
3379   unsigned E = VL.size();
3380   // Assign to all items the initial value E + 1 so we can check if the extract
3381   // instruction index was used already.
3382   // Also, later we can check that all the indices are used and we have a
3383   // consecutive access in the extract instructions, by checking that no
3384   // element of CurrentOrder still has value E + 1.
3385   CurrentOrder.assign(E, E + 1);
3386   unsigned I = 0;
3387   for (; I < E; ++I) {
3388     auto *Inst = cast<Instruction>(VL[I]);
3389     if (Inst->getOperand(0) != Vec)
3390       break;
3391     Optional<unsigned> Idx = getExtractIndex(Inst);
3392     if (!Idx)
3393       break;
3394     const unsigned ExtIdx = *Idx;
3395     if (ExtIdx != I) {
3396       if (ExtIdx >= E || CurrentOrder[ExtIdx] != E + 1)
3397         break;
3398       ShouldKeepOrder = false;
3399       CurrentOrder[ExtIdx] = I;
3400     } else {
3401       if (CurrentOrder[I] != E + 1)
3402         break;
3403       CurrentOrder[I] = I;
3404     }
3405   }
3406   if (I < E) {
3407     CurrentOrder.clear();
3408     return false;
3409   }
3410 
3411   return ShouldKeepOrder;
3412 }
3413 
3414 bool BoUpSLP::areAllUsersVectorized(Instruction *I) const {
3415   return I->hasOneUse() || llvm::all_of(I->users(), [this](User *U) {
3416            return ScalarToTreeEntry.count(U) > 0;
3417          });
3418 }
3419 
3420 static std::pair<InstructionCost, InstructionCost>
3421 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy,
3422                    TargetTransformInfo *TTI, TargetLibraryInfo *TLI) {
3423   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3424 
3425   // Calculate the cost of the scalar and vector calls.
3426   SmallVector<Type *, 4> VecTys;
3427   for (Use &Arg : CI->args())
3428     VecTys.push_back(
3429         FixedVectorType::get(Arg->getType(), VecTy->getNumElements()));
3430   FastMathFlags FMF;
3431   if (auto *FPCI = dyn_cast<FPMathOperator>(CI))
3432     FMF = FPCI->getFastMathFlags();
3433   SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end());
3434   IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF,
3435                                     dyn_cast<IntrinsicInst>(CI));
3436   auto IntrinsicCost =
3437     TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput);
3438 
3439   auto Shape = VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>(
3440                                      VecTy->getNumElements())),
3441                             false /*HasGlobalPred*/);
3442   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3443   auto LibCost = IntrinsicCost;
3444   if (!CI->isNoBuiltin() && VecFunc) {
3445     // Calculate the cost of the vector library call.
3446     // If the corresponding vector call is cheaper, return its cost.
3447     LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys,
3448                                     TTI::TCK_RecipThroughput);
3449   }
3450   return {IntrinsicCost, LibCost};
3451 }
3452 
3453 InstructionCost BoUpSLP::getEntryCost(TreeEntry *E) {
3454   ArrayRef<Value*> VL = E->Scalars;
3455 
3456   Type *ScalarTy = VL[0]->getType();
3457   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
3458     ScalarTy = SI->getValueOperand()->getType();
3459   else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0]))
3460     ScalarTy = CI->getOperand(0)->getType();
3461   auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
3462   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
3463 
3464   // If we have computed a smaller type for the expression, update VecTy so
3465   // that the costs will be accurate.
3466   if (MinBWs.count(VL[0]))
3467     VecTy = FixedVectorType::get(
3468         IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size());
3469 
3470   unsigned ReuseShuffleNumbers = E->ReuseShuffleIndices.size();
3471   bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
3472   InstructionCost ReuseShuffleCost = 0;
3473   if (NeedToShuffleReuses) {
3474     ReuseShuffleCost =
3475         TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy,
3476                             E->ReuseShuffleIndices);
3477   }
3478   if (E->State == TreeEntry::NeedToGather) {
3479     if (allConstant(VL))
3480       return 0;
3481     if (isSplat(VL)) {
3482       return ReuseShuffleCost +
3483              TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, None,
3484                                  0);
3485     }
3486     if (E->getOpcode() == Instruction::ExtractElement &&
3487         allSameType(VL) && allSameBlock(VL)) {
3488       SmallVector<int> Mask;
3489       Optional<TargetTransformInfo::ShuffleKind> ShuffleKind =
3490           isShuffle(VL, Mask);
3491       if (ShuffleKind.hasValue()) {
3492         InstructionCost Cost =
3493             TTI->getShuffleCost(ShuffleKind.getValue(), VecTy, Mask);
3494         for (auto *V : VL) {
3495           // If all users of instruction are going to be vectorized and this
3496           // instruction itself is not going to be vectorized, consider this
3497           // instruction as dead and remove its cost from the final cost of the
3498           // vectorized tree.
3499           if (areAllUsersVectorized(cast<Instruction>(V)) &&
3500               !ScalarToTreeEntry.count(V)) {
3501             auto *IO = cast<ConstantInt>(
3502                 cast<ExtractElementInst>(V)->getIndexOperand());
3503             Cost -= TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy,
3504                                             IO->getZExtValue());
3505           }
3506         }
3507         return ReuseShuffleCost + Cost;
3508       }
3509     }
3510     return ReuseShuffleCost + getGatherCost(VL);
3511   }
3512   assert((E->State == TreeEntry::Vectorize ||
3513           E->State == TreeEntry::ScatterVectorize) &&
3514          "Unhandled state");
3515   assert(E->getOpcode() && allSameType(VL) && allSameBlock(VL) && "Invalid VL");
3516   Instruction *VL0 = E->getMainOp();
3517   unsigned ShuffleOrOp =
3518       E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
3519   switch (ShuffleOrOp) {
3520     case Instruction::PHI:
3521       return 0;
3522 
3523     case Instruction::ExtractValue:
3524     case Instruction::ExtractElement: {
3525       // The common cost of removal ExtractElement/ExtractValue instructions +
3526       // the cost of shuffles, if required to resuffle the original vector.
3527       InstructionCost CommonCost = 0;
3528       if (NeedToShuffleReuses) {
3529         unsigned Idx = 0;
3530         for (unsigned I : E->ReuseShuffleIndices) {
3531           if (ShuffleOrOp == Instruction::ExtractElement) {
3532             auto *IO = cast<ConstantInt>(
3533                 cast<ExtractElementInst>(VL[I])->getIndexOperand());
3534             Idx = IO->getZExtValue();
3535             ReuseShuffleCost -= TTI->getVectorInstrCost(
3536                 Instruction::ExtractElement, VecTy, Idx);
3537           } else {
3538             ReuseShuffleCost -= TTI->getVectorInstrCost(
3539                 Instruction::ExtractElement, VecTy, Idx);
3540             ++Idx;
3541           }
3542         }
3543         Idx = ReuseShuffleNumbers;
3544         for (Value *V : VL) {
3545           if (ShuffleOrOp == Instruction::ExtractElement) {
3546             auto *IO = cast<ConstantInt>(
3547                 cast<ExtractElementInst>(V)->getIndexOperand());
3548             Idx = IO->getZExtValue();
3549           } else {
3550             --Idx;
3551           }
3552           ReuseShuffleCost +=
3553               TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, Idx);
3554         }
3555         CommonCost = ReuseShuffleCost;
3556       } else if (!E->ReorderIndices.empty()) {
3557         SmallVector<int> NewMask;
3558         inversePermutation(E->ReorderIndices, NewMask);
3559         CommonCost = TTI->getShuffleCost(
3560             TargetTransformInfo::SK_PermuteSingleSrc, VecTy, NewMask);
3561       }
3562       for (unsigned I = 0, E = VL.size(); I < E; ++I) {
3563         Instruction *EI = cast<Instruction>(VL[I]);
3564         // If all users are going to be vectorized, instruction can be
3565         // considered as dead.
3566         // The same, if have only one user, it will be vectorized for sure.
3567         if (areAllUsersVectorized(EI)) {
3568           // Take credit for instruction that will become dead.
3569           if (EI->hasOneUse()) {
3570             Instruction *Ext = EI->user_back();
3571             if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
3572                 all_of(Ext->users(),
3573                        [](User *U) { return isa<GetElementPtrInst>(U); })) {
3574               // Use getExtractWithExtendCost() to calculate the cost of
3575               // extractelement/ext pair.
3576               CommonCost -= TTI->getExtractWithExtendCost(
3577                   Ext->getOpcode(), Ext->getType(), VecTy, I);
3578               // Add back the cost of s|zext which is subtracted separately.
3579               CommonCost += TTI->getCastInstrCost(
3580                   Ext->getOpcode(), Ext->getType(), EI->getType(),
3581                   TTI::getCastContextHint(Ext), CostKind, Ext);
3582               continue;
3583             }
3584           }
3585           CommonCost -=
3586               TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, I);
3587         }
3588       }
3589       return CommonCost;
3590     }
3591     case Instruction::ZExt:
3592     case Instruction::SExt:
3593     case Instruction::FPToUI:
3594     case Instruction::FPToSI:
3595     case Instruction::FPExt:
3596     case Instruction::PtrToInt:
3597     case Instruction::IntToPtr:
3598     case Instruction::SIToFP:
3599     case Instruction::UIToFP:
3600     case Instruction::Trunc:
3601     case Instruction::FPTrunc:
3602     case Instruction::BitCast: {
3603       Type *SrcTy = VL0->getOperand(0)->getType();
3604       InstructionCost ScalarEltCost =
3605           TTI->getCastInstrCost(E->getOpcode(), ScalarTy, SrcTy,
3606                                 TTI::getCastContextHint(VL0), CostKind, VL0);
3607       if (NeedToShuffleReuses) {
3608         ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3609       }
3610 
3611       // Calculate the cost of this instruction.
3612       InstructionCost ScalarCost = VL.size() * ScalarEltCost;
3613 
3614       auto *SrcVecTy = FixedVectorType::get(SrcTy, VL.size());
3615       InstructionCost VecCost = 0;
3616       // Check if the values are candidates to demote.
3617       if (!MinBWs.count(VL0) || VecTy != SrcVecTy) {
3618         VecCost =
3619             ReuseShuffleCost +
3620             TTI->getCastInstrCost(E->getOpcode(), VecTy, SrcVecTy,
3621                                   TTI::getCastContextHint(VL0), CostKind, VL0);
3622       }
3623       LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecCost, ScalarCost));
3624       return VecCost - ScalarCost;
3625     }
3626     case Instruction::FCmp:
3627     case Instruction::ICmp:
3628     case Instruction::Select: {
3629       // Calculate the cost of this instruction.
3630       InstructionCost ScalarEltCost =
3631           TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, Builder.getInt1Ty(),
3632                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, VL0);
3633       if (NeedToShuffleReuses) {
3634         ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3635       }
3636       auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size());
3637       InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost;
3638 
3639       // Check if all entries in VL are either compares or selects with compares
3640       // as condition that have the same predicates.
3641       CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE;
3642       bool First = true;
3643       for (auto *V : VL) {
3644         CmpInst::Predicate CurrentPred;
3645         auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value());
3646         if ((!match(V, m_Select(MatchCmp, m_Value(), m_Value())) &&
3647              !match(V, MatchCmp)) ||
3648             (!First && VecPred != CurrentPred)) {
3649           VecPred = CmpInst::BAD_ICMP_PREDICATE;
3650           break;
3651         }
3652         First = false;
3653         VecPred = CurrentPred;
3654       }
3655 
3656       InstructionCost VecCost = TTI->getCmpSelInstrCost(
3657           E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0);
3658       // Check if it is possible and profitable to use min/max for selects in
3659       // VL.
3660       //
3661       auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL);
3662       if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) {
3663         IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy,
3664                                           {VecTy, VecTy});
3665         InstructionCost IntrinsicCost =
3666             TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
3667         // If the selects are the only uses of the compares, they will be dead
3668         // and we can adjust the cost by removing their cost.
3669         if (IntrinsicAndUse.second)
3670           IntrinsicCost -=
3671               TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy, MaskTy,
3672                                       CmpInst::BAD_ICMP_PREDICATE, CostKind);
3673         VecCost = std::min(VecCost, IntrinsicCost);
3674       }
3675       LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecCost, ScalarCost));
3676       return ReuseShuffleCost + VecCost - ScalarCost;
3677     }
3678     case Instruction::FNeg:
3679     case Instruction::Add:
3680     case Instruction::FAdd:
3681     case Instruction::Sub:
3682     case Instruction::FSub:
3683     case Instruction::Mul:
3684     case Instruction::FMul:
3685     case Instruction::UDiv:
3686     case Instruction::SDiv:
3687     case Instruction::FDiv:
3688     case Instruction::URem:
3689     case Instruction::SRem:
3690     case Instruction::FRem:
3691     case Instruction::Shl:
3692     case Instruction::LShr:
3693     case Instruction::AShr:
3694     case Instruction::And:
3695     case Instruction::Or:
3696     case Instruction::Xor: {
3697       // Certain instructions can be cheaper to vectorize if they have a
3698       // constant second vector operand.
3699       TargetTransformInfo::OperandValueKind Op1VK =
3700           TargetTransformInfo::OK_AnyValue;
3701       TargetTransformInfo::OperandValueKind Op2VK =
3702           TargetTransformInfo::OK_UniformConstantValue;
3703       TargetTransformInfo::OperandValueProperties Op1VP =
3704           TargetTransformInfo::OP_None;
3705       TargetTransformInfo::OperandValueProperties Op2VP =
3706           TargetTransformInfo::OP_PowerOf2;
3707 
3708       // If all operands are exactly the same ConstantInt then set the
3709       // operand kind to OK_UniformConstantValue.
3710       // If instead not all operands are constants, then set the operand kind
3711       // to OK_AnyValue. If all operands are constants but not the same,
3712       // then set the operand kind to OK_NonUniformConstantValue.
3713       ConstantInt *CInt0 = nullptr;
3714       for (unsigned i = 0, e = VL.size(); i < e; ++i) {
3715         const Instruction *I = cast<Instruction>(VL[i]);
3716         unsigned OpIdx = isa<BinaryOperator>(I) ? 1 : 0;
3717         ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(OpIdx));
3718         if (!CInt) {
3719           Op2VK = TargetTransformInfo::OK_AnyValue;
3720           Op2VP = TargetTransformInfo::OP_None;
3721           break;
3722         }
3723         if (Op2VP == TargetTransformInfo::OP_PowerOf2 &&
3724             !CInt->getValue().isPowerOf2())
3725           Op2VP = TargetTransformInfo::OP_None;
3726         if (i == 0) {
3727           CInt0 = CInt;
3728           continue;
3729         }
3730         if (CInt0 != CInt)
3731           Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
3732       }
3733 
3734       SmallVector<const Value *, 4> Operands(VL0->operand_values());
3735       InstructionCost ScalarEltCost =
3736           TTI->getArithmeticInstrCost(E->getOpcode(), ScalarTy, CostKind, Op1VK,
3737                                       Op2VK, Op1VP, Op2VP, Operands, VL0);
3738       if (NeedToShuffleReuses) {
3739         ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3740       }
3741       InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost;
3742       InstructionCost VecCost =
3743           TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind, Op1VK,
3744                                       Op2VK, Op1VP, Op2VP, Operands, VL0);
3745       LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecCost, ScalarCost));
3746       return ReuseShuffleCost + VecCost - ScalarCost;
3747     }
3748     case Instruction::GetElementPtr: {
3749       TargetTransformInfo::OperandValueKind Op1VK =
3750           TargetTransformInfo::OK_AnyValue;
3751       TargetTransformInfo::OperandValueKind Op2VK =
3752           TargetTransformInfo::OK_UniformConstantValue;
3753 
3754       InstructionCost ScalarEltCost = TTI->getArithmeticInstrCost(
3755           Instruction::Add, ScalarTy, CostKind, Op1VK, Op2VK);
3756       if (NeedToShuffleReuses) {
3757         ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3758       }
3759       InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost;
3760       InstructionCost VecCost = TTI->getArithmeticInstrCost(
3761           Instruction::Add, VecTy, CostKind, Op1VK, Op2VK);
3762       LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecCost, ScalarCost));
3763       return ReuseShuffleCost + VecCost - ScalarCost;
3764     }
3765     case Instruction::Load: {
3766       // Cost of wide load - cost of scalar loads.
3767       Align alignment = cast<LoadInst>(VL0)->getAlign();
3768       InstructionCost ScalarEltCost = TTI->getMemoryOpCost(
3769           Instruction::Load, ScalarTy, alignment, 0, CostKind, VL0);
3770       if (NeedToShuffleReuses) {
3771         ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3772       }
3773       InstructionCost ScalarLdCost = VecTy->getNumElements() * ScalarEltCost;
3774       InstructionCost VecLdCost;
3775       if (E->State == TreeEntry::Vectorize) {
3776         VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, alignment, 0,
3777                                          CostKind, VL0);
3778       } else {
3779         assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState");
3780         VecLdCost = TTI->getGatherScatterOpCost(
3781             Instruction::Load, VecTy, cast<LoadInst>(VL0)->getPointerOperand(),
3782             /*VariableMask=*/false, alignment, CostKind, VL0);
3783       }
3784       if (!NeedToShuffleReuses && !E->ReorderIndices.empty()) {
3785         SmallVector<int> NewMask;
3786         inversePermutation(E->ReorderIndices, NewMask);
3787         VecLdCost += TTI->getShuffleCost(
3788             TargetTransformInfo::SK_PermuteSingleSrc, VecTy, NewMask);
3789       }
3790       LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecLdCost, ScalarLdCost));
3791       return ReuseShuffleCost + VecLdCost - ScalarLdCost;
3792     }
3793     case Instruction::Store: {
3794       // We know that we can merge the stores. Calculate the cost.
3795       bool IsReorder = !E->ReorderIndices.empty();
3796       auto *SI =
3797           cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0);
3798       Align Alignment = SI->getAlign();
3799       InstructionCost ScalarEltCost = TTI->getMemoryOpCost(
3800           Instruction::Store, ScalarTy, Alignment, 0, CostKind, VL0);
3801       InstructionCost ScalarStCost = VecTy->getNumElements() * ScalarEltCost;
3802       InstructionCost VecStCost = TTI->getMemoryOpCost(
3803           Instruction::Store, VecTy, Alignment, 0, CostKind, VL0);
3804       if (IsReorder) {
3805         SmallVector<int> NewMask;
3806         inversePermutation(E->ReorderIndices, NewMask);
3807         VecStCost += TTI->getShuffleCost(
3808             TargetTransformInfo::SK_PermuteSingleSrc, VecTy, NewMask);
3809       }
3810       LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecStCost, ScalarStCost));
3811       return VecStCost - ScalarStCost;
3812     }
3813     case Instruction::Call: {
3814       CallInst *CI = cast<CallInst>(VL0);
3815       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3816 
3817       // Calculate the cost of the scalar and vector calls.
3818       IntrinsicCostAttributes CostAttrs(ID, *CI, 1);
3819       InstructionCost ScalarEltCost =
3820           TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
3821       if (NeedToShuffleReuses) {
3822         ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3823       }
3824       InstructionCost ScalarCallCost = VecTy->getNumElements() * ScalarEltCost;
3825 
3826       auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI);
3827       InstructionCost VecCallCost =
3828           std::min(VecCallCosts.first, VecCallCosts.second);
3829 
3830       LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost
3831                         << " (" << VecCallCost << "-" << ScalarCallCost << ")"
3832                         << " for " << *CI << "\n");
3833 
3834       return ReuseShuffleCost + VecCallCost - ScalarCallCost;
3835     }
3836     case Instruction::ShuffleVector: {
3837       assert(E->isAltShuffle() &&
3838              ((Instruction::isBinaryOp(E->getOpcode()) &&
3839                Instruction::isBinaryOp(E->getAltOpcode())) ||
3840               (Instruction::isCast(E->getOpcode()) &&
3841                Instruction::isCast(E->getAltOpcode()))) &&
3842              "Invalid Shuffle Vector Operand");
3843       InstructionCost ScalarCost = 0;
3844       if (NeedToShuffleReuses) {
3845         for (unsigned Idx : E->ReuseShuffleIndices) {
3846           Instruction *I = cast<Instruction>(VL[Idx]);
3847           ReuseShuffleCost -= TTI->getInstructionCost(I, CostKind);
3848         }
3849         for (Value *V : VL) {
3850           Instruction *I = cast<Instruction>(V);
3851           ReuseShuffleCost += TTI->getInstructionCost(I, CostKind);
3852         }
3853       }
3854       for (Value *V : VL) {
3855         Instruction *I = cast<Instruction>(V);
3856         assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode");
3857         ScalarCost += TTI->getInstructionCost(I, CostKind);
3858       }
3859       // VecCost is equal to sum of the cost of creating 2 vectors
3860       // and the cost of creating shuffle.
3861       InstructionCost VecCost = 0;
3862       if (Instruction::isBinaryOp(E->getOpcode())) {
3863         VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind);
3864         VecCost += TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy,
3865                                                CostKind);
3866       } else {
3867         Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType();
3868         Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType();
3869         auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size());
3870         auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size());
3871         VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty,
3872                                         TTI::CastContextHint::None, CostKind);
3873         VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty,
3874                                          TTI::CastContextHint::None, CostKind);
3875       }
3876 
3877       SmallVector<int> Mask(E->Scalars.size());
3878       for (unsigned I = 0, End = E->Scalars.size(); I < End; ++I) {
3879         auto *OpInst = cast<Instruction>(E->Scalars[I]);
3880         assert(E->isOpcodeOrAlt(OpInst) && "Unexpected main/alternate opcode");
3881         Mask[I] = I + (OpInst->getOpcode() == E->getAltOpcode() ? End : 0);
3882       }
3883       VecCost +=
3884           TTI->getShuffleCost(TargetTransformInfo::SK_Select, VecTy, Mask, 0);
3885       LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecCost, ScalarCost));
3886       return ReuseShuffleCost + VecCost - ScalarCost;
3887     }
3888     default:
3889       llvm_unreachable("Unknown instruction");
3890   }
3891 }
3892 
3893 bool BoUpSLP::isFullyVectorizableTinyTree() const {
3894   LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height "
3895                     << VectorizableTree.size() << " is fully vectorizable .\n");
3896 
3897   // We only handle trees of heights 1 and 2.
3898   if (VectorizableTree.size() == 1 &&
3899       VectorizableTree[0]->State == TreeEntry::Vectorize)
3900     return true;
3901 
3902   if (VectorizableTree.size() != 2)
3903     return false;
3904 
3905   // Handle splat and all-constants stores.
3906   if (VectorizableTree[0]->State == TreeEntry::Vectorize &&
3907       (allConstant(VectorizableTree[1]->Scalars) ||
3908        isSplat(VectorizableTree[1]->Scalars)))
3909     return true;
3910 
3911   // Gathering cost would be too much for tiny trees.
3912   if (VectorizableTree[0]->State == TreeEntry::NeedToGather ||
3913       VectorizableTree[1]->State == TreeEntry::NeedToGather)
3914     return false;
3915 
3916   return true;
3917 }
3918 
3919 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts,
3920                                        TargetTransformInfo *TTI) {
3921   // Look past the root to find a source value. Arbitrarily follow the
3922   // path through operand 0 of any 'or'. Also, peek through optional
3923   // shift-left-by-multiple-of-8-bits.
3924   Value *ZextLoad = Root;
3925   const APInt *ShAmtC;
3926   while (!isa<ConstantExpr>(ZextLoad) &&
3927          (match(ZextLoad, m_Or(m_Value(), m_Value())) ||
3928           (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) &&
3929            ShAmtC->urem(8) == 0)))
3930     ZextLoad = cast<BinaryOperator>(ZextLoad)->getOperand(0);
3931 
3932   // Check if the input is an extended load of the required or/shift expression.
3933   Value *LoadPtr;
3934   if (ZextLoad == Root || !match(ZextLoad, m_ZExt(m_Load(m_Value(LoadPtr)))))
3935     return false;
3936 
3937   // Require that the total load bit width is a legal integer type.
3938   // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target.
3939   // But <16 x i8> --> i128 is not, so the backend probably can't reduce it.
3940   Type *SrcTy = LoadPtr->getType()->getPointerElementType();
3941   unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts;
3942   if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth)))
3943     return false;
3944 
3945   // Everything matched - assume that we can fold the whole sequence using
3946   // load combining.
3947   LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at "
3948              << *(cast<Instruction>(Root)) << "\n");
3949 
3950   return true;
3951 }
3952 
3953 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const {
3954   if (RdxKind != RecurKind::Or)
3955     return false;
3956 
3957   unsigned NumElts = VectorizableTree[0]->Scalars.size();
3958   Value *FirstReduced = VectorizableTree[0]->Scalars[0];
3959   return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI);
3960 }
3961 
3962 bool BoUpSLP::isLoadCombineCandidate() const {
3963   // Peek through a final sequence of stores and check if all operations are
3964   // likely to be load-combined.
3965   unsigned NumElts = VectorizableTree[0]->Scalars.size();
3966   for (Value *Scalar : VectorizableTree[0]->Scalars) {
3967     Value *X;
3968     if (!match(Scalar, m_Store(m_Value(X), m_Value())) ||
3969         !isLoadCombineCandidateImpl(X, NumElts, TTI))
3970       return false;
3971   }
3972   return true;
3973 }
3974 
3975 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() const {
3976   // We can vectorize the tree if its size is greater than or equal to the
3977   // minimum size specified by the MinTreeSize command line option.
3978   if (VectorizableTree.size() >= MinTreeSize)
3979     return false;
3980 
3981   // If we have a tiny tree (a tree whose size is less than MinTreeSize), we
3982   // can vectorize it if we can prove it fully vectorizable.
3983   if (isFullyVectorizableTinyTree())
3984     return false;
3985 
3986   assert(VectorizableTree.empty()
3987              ? ExternalUses.empty()
3988              : true && "We shouldn't have any external users");
3989 
3990   // Otherwise, we can't vectorize the tree. It is both tiny and not fully
3991   // vectorizable.
3992   return true;
3993 }
3994 
3995 InstructionCost BoUpSLP::getSpillCost() const {
3996   // Walk from the bottom of the tree to the top, tracking which values are
3997   // live. When we see a call instruction that is not part of our tree,
3998   // query TTI to see if there is a cost to keeping values live over it
3999   // (for example, if spills and fills are required).
4000   unsigned BundleWidth = VectorizableTree.front()->Scalars.size();
4001   InstructionCost Cost = 0;
4002 
4003   SmallPtrSet<Instruction*, 4> LiveValues;
4004   Instruction *PrevInst = nullptr;
4005 
4006   // The entries in VectorizableTree are not necessarily ordered by their
4007   // position in basic blocks. Collect them and order them by dominance so later
4008   // instructions are guaranteed to be visited first. For instructions in
4009   // different basic blocks, we only scan to the beginning of the block, so
4010   // their order does not matter, as long as all instructions in a basic block
4011   // are grouped together. Using dominance ensures a deterministic order.
4012   SmallVector<Instruction *, 16> OrderedScalars;
4013   for (const auto &TEPtr : VectorizableTree) {
4014     Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]);
4015     if (!Inst)
4016       continue;
4017     OrderedScalars.push_back(Inst);
4018   }
4019   llvm::stable_sort(OrderedScalars, [this](Instruction *A, Instruction *B) {
4020     return DT->dominates(B, A);
4021   });
4022 
4023   for (Instruction *Inst : OrderedScalars) {
4024     if (!PrevInst) {
4025       PrevInst = Inst;
4026       continue;
4027     }
4028 
4029     // Update LiveValues.
4030     LiveValues.erase(PrevInst);
4031     for (auto &J : PrevInst->operands()) {
4032       if (isa<Instruction>(&*J) && getTreeEntry(&*J))
4033         LiveValues.insert(cast<Instruction>(&*J));
4034     }
4035 
4036     LLVM_DEBUG({
4037       dbgs() << "SLP: #LV: " << LiveValues.size();
4038       for (auto *X : LiveValues)
4039         dbgs() << " " << X->getName();
4040       dbgs() << ", Looking at ";
4041       Inst->dump();
4042     });
4043 
4044     // Now find the sequence of instructions between PrevInst and Inst.
4045     unsigned NumCalls = 0;
4046     BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(),
4047                                  PrevInstIt =
4048                                      PrevInst->getIterator().getReverse();
4049     while (InstIt != PrevInstIt) {
4050       if (PrevInstIt == PrevInst->getParent()->rend()) {
4051         PrevInstIt = Inst->getParent()->rbegin();
4052         continue;
4053       }
4054 
4055       // Debug information does not impact spill cost.
4056       if ((isa<CallInst>(&*PrevInstIt) &&
4057            !isa<DbgInfoIntrinsic>(&*PrevInstIt)) &&
4058           &*PrevInstIt != PrevInst)
4059         NumCalls++;
4060 
4061       ++PrevInstIt;
4062     }
4063 
4064     if (NumCalls) {
4065       SmallVector<Type*, 4> V;
4066       for (auto *II : LiveValues)
4067         V.push_back(FixedVectorType::get(II->getType(), BundleWidth));
4068       Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V);
4069     }
4070 
4071     PrevInst = Inst;
4072   }
4073 
4074   return Cost;
4075 }
4076 
4077 InstructionCost BoUpSLP::getTreeCost() {
4078   InstructionCost Cost = 0;
4079   LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size "
4080                     << VectorizableTree.size() << ".\n");
4081 
4082   unsigned BundleWidth = VectorizableTree[0]->Scalars.size();
4083 
4084   for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) {
4085     TreeEntry &TE = *VectorizableTree[I].get();
4086 
4087     // We create duplicate tree entries for gather sequences that have multiple
4088     // uses. However, we should not compute the cost of duplicate sequences.
4089     // For example, if we have a build vector (i.e., insertelement sequence)
4090     // that is used by more than one vector instruction, we only need to
4091     // compute the cost of the insertelement instructions once. The redundant
4092     // instructions will be eliminated by CSE.
4093     //
4094     // We should consider not creating duplicate tree entries for gather
4095     // sequences, and instead add additional edges to the tree representing
4096     // their uses. Since such an approach results in fewer total entries,
4097     // existing heuristics based on tree size may yield different results.
4098     //
4099     if (TE.State == TreeEntry::NeedToGather &&
4100         std::any_of(std::next(VectorizableTree.begin(), I + 1),
4101                     VectorizableTree.end(),
4102                     [TE](const std::unique_ptr<TreeEntry> &EntryPtr) {
4103                       return EntryPtr->State == TreeEntry::NeedToGather &&
4104                              EntryPtr->isSame(TE.Scalars);
4105                     }))
4106       continue;
4107 
4108     InstructionCost C = getEntryCost(&TE);
4109     Cost += C;
4110     LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
4111                       << " for bundle that starts with " << *TE.Scalars[0]
4112                       << ".\n"
4113                       << "SLP: Current total cost = " << Cost << "\n");
4114   }
4115 
4116   SmallPtrSet<Value *, 16> ExtractCostCalculated;
4117   InstructionCost ExtractCost = 0;
4118   for (ExternalUser &EU : ExternalUses) {
4119     // We only add extract cost once for the same scalar.
4120     if (!ExtractCostCalculated.insert(EU.Scalar).second)
4121       continue;
4122 
4123     // Uses by ephemeral values are free (because the ephemeral value will be
4124     // removed prior to code generation, and so the extraction will be
4125     // removed as well).
4126     if (EphValues.count(EU.User))
4127       continue;
4128 
4129     // If we plan to rewrite the tree in a smaller type, we will need to sign
4130     // extend the extracted value back to the original type. Here, we account
4131     // for the extract and the added cost of the sign extend if needed.
4132     auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth);
4133     auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
4134     if (MinBWs.count(ScalarRoot)) {
4135       auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
4136       auto Extend =
4137           MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt;
4138       VecTy = FixedVectorType::get(MinTy, BundleWidth);
4139       ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(),
4140                                                    VecTy, EU.Lane);
4141     } else {
4142       ExtractCost +=
4143           TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane);
4144     }
4145   }
4146 
4147   InstructionCost SpillCost = getSpillCost();
4148   Cost += SpillCost + ExtractCost;
4149 
4150 #ifndef NDEBUG
4151   SmallString<256> Str;
4152   {
4153     raw_svector_ostream OS(Str);
4154     OS << "SLP: Spill Cost = " << SpillCost << ".\n"
4155        << "SLP: Extract Cost = " << ExtractCost << ".\n"
4156        << "SLP: Total Cost = " << Cost << ".\n";
4157   }
4158   LLVM_DEBUG(dbgs() << Str);
4159   if (ViewSLPTree)
4160     ViewGraph(this, "SLP" + F->getName(), false, Str);
4161 #endif
4162 
4163   return Cost;
4164 }
4165 
4166 InstructionCost
4167 BoUpSLP::getGatherCost(FixedVectorType *Ty,
4168                        const DenseSet<unsigned> &ShuffledIndices) const {
4169   unsigned NumElts = Ty->getNumElements();
4170   APInt DemandedElts = APInt::getNullValue(NumElts);
4171   for (unsigned I = 0; I < NumElts; ++I)
4172     if (!ShuffledIndices.count(I))
4173       DemandedElts.setBit(I);
4174   InstructionCost Cost =
4175       TTI->getScalarizationOverhead(Ty, DemandedElts, /*Insert*/ true,
4176                                     /*Extract*/ false);
4177   if (!ShuffledIndices.empty())
4178     Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty);
4179   return Cost;
4180 }
4181 
4182 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const {
4183   // Find the type of the operands in VL.
4184   Type *ScalarTy = VL[0]->getType();
4185   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
4186     ScalarTy = SI->getValueOperand()->getType();
4187   auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
4188   // Find the cost of inserting/extracting values from the vector.
4189   // Check if the same elements are inserted several times and count them as
4190   // shuffle candidates.
4191   DenseSet<unsigned> ShuffledElements;
4192   DenseSet<Value *> UniqueElements;
4193   // Iterate in reverse order to consider insert elements with the high cost.
4194   for (unsigned I = VL.size(); I > 0; --I) {
4195     unsigned Idx = I - 1;
4196     if (!UniqueElements.insert(VL[Idx]).second)
4197       ShuffledElements.insert(Idx);
4198   }
4199   return getGatherCost(VecTy, ShuffledElements);
4200 }
4201 
4202 // Perform operand reordering on the instructions in VL and return the reordered
4203 // operands in Left and Right.
4204 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
4205                                              SmallVectorImpl<Value *> &Left,
4206                                              SmallVectorImpl<Value *> &Right,
4207                                              const DataLayout &DL,
4208                                              ScalarEvolution &SE,
4209                                              const BoUpSLP &R) {
4210   if (VL.empty())
4211     return;
4212   VLOperands Ops(VL, DL, SE, R);
4213   // Reorder the operands in place.
4214   Ops.reorder();
4215   Left = Ops.getVL(0);
4216   Right = Ops.getVL(1);
4217 }
4218 
4219 void BoUpSLP::setInsertPointAfterBundle(TreeEntry *E) {
4220   // Get the basic block this bundle is in. All instructions in the bundle
4221   // should be in this block.
4222   auto *Front = E->getMainOp();
4223   auto *BB = Front->getParent();
4224   assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool {
4225     auto *I = cast<Instruction>(V);
4226     return !E->isOpcodeOrAlt(I) || I->getParent() == BB;
4227   }));
4228 
4229   // The last instruction in the bundle in program order.
4230   Instruction *LastInst = nullptr;
4231 
4232   // Find the last instruction. The common case should be that BB has been
4233   // scheduled, and the last instruction is VL.back(). So we start with
4234   // VL.back() and iterate over schedule data until we reach the end of the
4235   // bundle. The end of the bundle is marked by null ScheduleData.
4236   if (BlocksSchedules.count(BB)) {
4237     auto *Bundle =
4238         BlocksSchedules[BB]->getScheduleData(E->isOneOf(E->Scalars.back()));
4239     if (Bundle && Bundle->isPartOfBundle())
4240       for (; Bundle; Bundle = Bundle->NextInBundle)
4241         if (Bundle->OpValue == Bundle->Inst)
4242           LastInst = Bundle->Inst;
4243   }
4244 
4245   // LastInst can still be null at this point if there's either not an entry
4246   // for BB in BlocksSchedules or there's no ScheduleData available for
4247   // VL.back(). This can be the case if buildTree_rec aborts for various
4248   // reasons (e.g., the maximum recursion depth is reached, the maximum region
4249   // size is reached, etc.). ScheduleData is initialized in the scheduling
4250   // "dry-run".
4251   //
4252   // If this happens, we can still find the last instruction by brute force. We
4253   // iterate forwards from Front (inclusive) until we either see all
4254   // instructions in the bundle or reach the end of the block. If Front is the
4255   // last instruction in program order, LastInst will be set to Front, and we
4256   // will visit all the remaining instructions in the block.
4257   //
4258   // One of the reasons we exit early from buildTree_rec is to place an upper
4259   // bound on compile-time. Thus, taking an additional compile-time hit here is
4260   // not ideal. However, this should be exceedingly rare since it requires that
4261   // we both exit early from buildTree_rec and that the bundle be out-of-order
4262   // (causing us to iterate all the way to the end of the block).
4263   if (!LastInst) {
4264     SmallPtrSet<Value *, 16> Bundle(E->Scalars.begin(), E->Scalars.end());
4265     for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) {
4266       if (Bundle.erase(&I) && E->isOpcodeOrAlt(&I))
4267         LastInst = &I;
4268       if (Bundle.empty())
4269         break;
4270     }
4271   }
4272   assert(LastInst && "Failed to find last instruction in bundle");
4273 
4274   // Set the insertion point after the last instruction in the bundle. Set the
4275   // debug location to Front.
4276   Builder.SetInsertPoint(BB, ++LastInst->getIterator());
4277   Builder.SetCurrentDebugLocation(Front->getDebugLoc());
4278 }
4279 
4280 Value *BoUpSLP::gather(ArrayRef<Value *> VL) {
4281   Value *Val0 =
4282       isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0];
4283   FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size());
4284   Value *Vec = PoisonValue::get(VecTy);
4285   unsigned InsIndex = 0;
4286   for (Value *Val : VL) {
4287     Vec = Builder.CreateInsertElement(Vec, Val, Builder.getInt32(InsIndex++));
4288     auto *InsElt = dyn_cast<InsertElementInst>(Vec);
4289     if (!InsElt)
4290       continue;
4291     GatherSeq.insert(InsElt);
4292     CSEBlocks.insert(InsElt->getParent());
4293     // Add to our 'need-to-extract' list.
4294     if (TreeEntry *Entry = getTreeEntry(Val)) {
4295       // Find which lane we need to extract.
4296       unsigned FoundLane = std::distance(Entry->Scalars.begin(),
4297                                          find(Entry->Scalars, Val));
4298       assert(FoundLane < Entry->Scalars.size() && "Couldn't find extract lane");
4299       if (!Entry->ReuseShuffleIndices.empty()) {
4300         FoundLane = std::distance(Entry->ReuseShuffleIndices.begin(),
4301                                   find(Entry->ReuseShuffleIndices, FoundLane));
4302       }
4303       ExternalUses.push_back(ExternalUser(Val, InsElt, FoundLane));
4304     }
4305   }
4306 
4307   return Vec;
4308 }
4309 
4310 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
4311   InstructionsState S = getSameOpcode(VL);
4312   if (S.getOpcode()) {
4313     if (TreeEntry *E = getTreeEntry(S.OpValue)) {
4314       if (E->isSame(VL)) {
4315         Value *V = vectorizeTree(E);
4316         if (VL.size() == E->Scalars.size() && !E->ReuseShuffleIndices.empty()) {
4317           // Reshuffle to get only unique values.
4318           // If some of the scalars are duplicated in the vectorization tree
4319           // entry, we do not vectorize them but instead generate a mask for the
4320           // reuses. But if there are several users of the same entry, they may
4321           // have different vectorization factors. This is especially important
4322           // for PHI nodes. In this case, we need to adapt the resulting
4323           // instruction for the user vectorization factor and have to reshuffle
4324           // it again to take only unique elements of the vector. Without this
4325           // code the function incorrectly returns reduced vector instruction
4326           // with the same elements, not with the unique ones.
4327           // block:
4328           // %phi = phi <2 x > { .., %entry} {%shuffle, %block}
4329           // %2 = shuffle <2 x > %phi, %poison, <4 x > <0, 0, 1, 1>
4330           // ... (use %2)
4331           // %shuffle = shuffle <2 x> %2, poison, <2 x> {0, 2}
4332           // br %block
4333           SmallVector<int, 4> UniqueIdxs;
4334           SmallSet<int, 4> UsedIdxs;
4335           int Pos = 0;
4336           for (int Idx : E->ReuseShuffleIndices) {
4337             if (UsedIdxs.insert(Idx).second)
4338               UniqueIdxs.emplace_back(Pos);
4339             ++Pos;
4340           }
4341           V = Builder.CreateShuffleVector(V, UniqueIdxs, "shrink.shuffle");
4342         }
4343         return V;
4344       }
4345     }
4346   }
4347 
4348   // Check that every instruction appears once in this bundle.
4349   SmallVector<int, 4> ReuseShuffleIndicies;
4350   SmallVector<Value *, 4> UniqueValues;
4351   if (VL.size() > 2) {
4352     DenseMap<Value *, unsigned> UniquePositions;
4353     for (Value *V : VL) {
4354       auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
4355       ReuseShuffleIndicies.emplace_back(Res.first->second);
4356       if (Res.second || isa<Constant>(V))
4357         UniqueValues.emplace_back(V);
4358     }
4359     // Do not shuffle single element or if number of unique values is not power
4360     // of 2.
4361     if (UniqueValues.size() == VL.size() || UniqueValues.size() <= 1 ||
4362         !llvm::isPowerOf2_32(UniqueValues.size()))
4363       ReuseShuffleIndicies.clear();
4364     else
4365       VL = UniqueValues;
4366   }
4367 
4368   Value *Vec = gather(VL);
4369   if (!ReuseShuffleIndicies.empty()) {
4370     Vec = Builder.CreateShuffleVector(Vec, ReuseShuffleIndicies, "shuffle");
4371     if (auto *I = dyn_cast<Instruction>(Vec)) {
4372       GatherSeq.insert(I);
4373       CSEBlocks.insert(I->getParent());
4374     }
4375   }
4376   return Vec;
4377 }
4378 
4379 namespace {
4380 /// Merges shuffle masks and emits final shuffle instruction, if required.
4381 class ShuffleInstructionBuilder {
4382   IRBuilderBase &Builder;
4383   bool IsFinalized = false;
4384   SmallVector<int, 4> Mask;
4385 
4386 public:
4387   ShuffleInstructionBuilder(IRBuilderBase &Builder) : Builder(Builder) {}
4388 
4389   /// Adds a mask, inverting it before applying.
4390   void addInversedMask(ArrayRef<unsigned> SubMask) {
4391     if (SubMask.empty())
4392       return;
4393     SmallVector<int, 4> NewMask;
4394     inversePermutation(SubMask, NewMask);
4395     addMask(NewMask);
4396   }
4397 
4398   /// Functions adds masks, merging them into  single one.
4399   void addMask(ArrayRef<unsigned> SubMask) {
4400     SmallVector<int, 4> NewMask(SubMask.begin(), SubMask.end());
4401     addMask(NewMask);
4402   }
4403 
4404   void addMask(ArrayRef<int> SubMask) {
4405     if (SubMask.empty())
4406       return;
4407     if (Mask.empty()) {
4408       Mask.append(SubMask.begin(), SubMask.end());
4409       return;
4410     }
4411     SmallVector<int, 4> NewMask(SubMask.size(), SubMask.size());
4412     int TermValue = std::min(Mask.size(), SubMask.size());
4413     for (int I = 0, E = SubMask.size(); I < E; ++I) {
4414       if (SubMask[I] >= TermValue || Mask[SubMask[I]] >= TermValue) {
4415         NewMask[I] = E;
4416         continue;
4417       }
4418       NewMask[I] = Mask[SubMask[I]];
4419     }
4420     Mask.swap(NewMask);
4421   }
4422 
4423   Value *finalize(Value *V) {
4424     IsFinalized = true;
4425     if (Mask.empty())
4426       return V;
4427     return Builder.CreateShuffleVector(V, Mask, "shuffle");
4428   }
4429 
4430   ~ShuffleInstructionBuilder() {
4431     assert((IsFinalized || Mask.empty()) &&
4432            "Shuffle construction must be finalized.");
4433   }
4434 };
4435 } // namespace
4436 
4437 Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
4438   IRBuilder<>::InsertPointGuard Guard(Builder);
4439 
4440   if (E->VectorizedValue) {
4441     LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
4442     return E->VectorizedValue;
4443   }
4444 
4445   ShuffleInstructionBuilder ShuffleBuilder(Builder);
4446   bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
4447   if (E->State == TreeEntry::NeedToGather) {
4448     setInsertPointAfterBundle(E);
4449     Value *Vec = gather(E->Scalars);
4450     if (NeedToShuffleReuses) {
4451       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4452       Vec = ShuffleBuilder.finalize(Vec);
4453       if (auto *I = dyn_cast<Instruction>(Vec)) {
4454         GatherSeq.insert(I);
4455         CSEBlocks.insert(I->getParent());
4456       }
4457     }
4458     E->VectorizedValue = Vec;
4459     return Vec;
4460   }
4461 
4462   assert((E->State == TreeEntry::Vectorize ||
4463           E->State == TreeEntry::ScatterVectorize) &&
4464          "Unhandled state");
4465   unsigned ShuffleOrOp =
4466       E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
4467   Instruction *VL0 = E->getMainOp();
4468   Type *ScalarTy = VL0->getType();
4469   if (auto *Store = dyn_cast<StoreInst>(VL0))
4470     ScalarTy = Store->getValueOperand()->getType();
4471   auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size());
4472   switch (ShuffleOrOp) {
4473     case Instruction::PHI: {
4474       auto *PH = cast<PHINode>(VL0);
4475       Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI());
4476       Builder.SetCurrentDebugLocation(PH->getDebugLoc());
4477       PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
4478       Value *V = NewPhi;
4479       if (NeedToShuffleReuses)
4480         V = Builder.CreateShuffleVector(V, E->ReuseShuffleIndices, "shuffle");
4481 
4482       E->VectorizedValue = V;
4483 
4484       // PHINodes may have multiple entries from the same block. We want to
4485       // visit every block once.
4486       SmallPtrSet<BasicBlock*, 4> VisitedBBs;
4487 
4488       for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
4489         ValueList Operands;
4490         BasicBlock *IBB = PH->getIncomingBlock(i);
4491 
4492         if (!VisitedBBs.insert(IBB).second) {
4493           NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
4494           continue;
4495         }
4496 
4497         Builder.SetInsertPoint(IBB->getTerminator());
4498         Builder.SetCurrentDebugLocation(PH->getDebugLoc());
4499         Value *Vec = vectorizeTree(E->getOperand(i));
4500         NewPhi->addIncoming(Vec, IBB);
4501       }
4502 
4503       assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&
4504              "Invalid number of incoming values");
4505       return V;
4506     }
4507 
4508     case Instruction::ExtractElement: {
4509       Value *V = E->getSingleOperand(0);
4510       Builder.SetInsertPoint(VL0);
4511       ShuffleBuilder.addInversedMask(E->ReorderIndices);
4512       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4513       V = ShuffleBuilder.finalize(V);
4514       E->VectorizedValue = V;
4515       return V;
4516     }
4517     case Instruction::ExtractValue: {
4518       auto *LI = cast<LoadInst>(E->getSingleOperand(0));
4519       Builder.SetInsertPoint(LI);
4520       auto *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace());
4521       Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy);
4522       LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign());
4523       Value *NewV = propagateMetadata(V, E->Scalars);
4524       ShuffleBuilder.addInversedMask(E->ReorderIndices);
4525       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4526       NewV = ShuffleBuilder.finalize(NewV);
4527       E->VectorizedValue = NewV;
4528       return NewV;
4529     }
4530     case Instruction::ZExt:
4531     case Instruction::SExt:
4532     case Instruction::FPToUI:
4533     case Instruction::FPToSI:
4534     case Instruction::FPExt:
4535     case Instruction::PtrToInt:
4536     case Instruction::IntToPtr:
4537     case Instruction::SIToFP:
4538     case Instruction::UIToFP:
4539     case Instruction::Trunc:
4540     case Instruction::FPTrunc:
4541     case Instruction::BitCast: {
4542       setInsertPointAfterBundle(E);
4543 
4544       Value *InVec = vectorizeTree(E->getOperand(0));
4545 
4546       if (E->VectorizedValue) {
4547         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
4548         return E->VectorizedValue;
4549       }
4550 
4551       auto *CI = cast<CastInst>(VL0);
4552       Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy);
4553       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4554       V = ShuffleBuilder.finalize(V);
4555 
4556       E->VectorizedValue = V;
4557       ++NumVectorInstructions;
4558       return V;
4559     }
4560     case Instruction::FCmp:
4561     case Instruction::ICmp: {
4562       setInsertPointAfterBundle(E);
4563 
4564       Value *L = vectorizeTree(E->getOperand(0));
4565       Value *R = vectorizeTree(E->getOperand(1));
4566 
4567       if (E->VectorizedValue) {
4568         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
4569         return E->VectorizedValue;
4570       }
4571 
4572       CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
4573       Value *V = Builder.CreateCmp(P0, L, R);
4574       propagateIRFlags(V, E->Scalars, VL0);
4575       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4576       V = ShuffleBuilder.finalize(V);
4577 
4578       E->VectorizedValue = V;
4579       ++NumVectorInstructions;
4580       return V;
4581     }
4582     case Instruction::Select: {
4583       setInsertPointAfterBundle(E);
4584 
4585       Value *Cond = vectorizeTree(E->getOperand(0));
4586       Value *True = vectorizeTree(E->getOperand(1));
4587       Value *False = vectorizeTree(E->getOperand(2));
4588 
4589       if (E->VectorizedValue) {
4590         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
4591         return E->VectorizedValue;
4592       }
4593 
4594       Value *V = Builder.CreateSelect(Cond, True, False);
4595       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4596       V = ShuffleBuilder.finalize(V);
4597 
4598       E->VectorizedValue = V;
4599       ++NumVectorInstructions;
4600       return V;
4601     }
4602     case Instruction::FNeg: {
4603       setInsertPointAfterBundle(E);
4604 
4605       Value *Op = vectorizeTree(E->getOperand(0));
4606 
4607       if (E->VectorizedValue) {
4608         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
4609         return E->VectorizedValue;
4610       }
4611 
4612       Value *V = Builder.CreateUnOp(
4613           static_cast<Instruction::UnaryOps>(E->getOpcode()), Op);
4614       propagateIRFlags(V, E->Scalars, VL0);
4615       if (auto *I = dyn_cast<Instruction>(V))
4616         V = propagateMetadata(I, E->Scalars);
4617 
4618       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4619       V = ShuffleBuilder.finalize(V);
4620 
4621       E->VectorizedValue = V;
4622       ++NumVectorInstructions;
4623 
4624       return V;
4625     }
4626     case Instruction::Add:
4627     case Instruction::FAdd:
4628     case Instruction::Sub:
4629     case Instruction::FSub:
4630     case Instruction::Mul:
4631     case Instruction::FMul:
4632     case Instruction::UDiv:
4633     case Instruction::SDiv:
4634     case Instruction::FDiv:
4635     case Instruction::URem:
4636     case Instruction::SRem:
4637     case Instruction::FRem:
4638     case Instruction::Shl:
4639     case Instruction::LShr:
4640     case Instruction::AShr:
4641     case Instruction::And:
4642     case Instruction::Or:
4643     case Instruction::Xor: {
4644       setInsertPointAfterBundle(E);
4645 
4646       Value *LHS = vectorizeTree(E->getOperand(0));
4647       Value *RHS = vectorizeTree(E->getOperand(1));
4648 
4649       if (E->VectorizedValue) {
4650         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
4651         return E->VectorizedValue;
4652       }
4653 
4654       Value *V = Builder.CreateBinOp(
4655           static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS,
4656           RHS);
4657       propagateIRFlags(V, E->Scalars, VL0);
4658       if (auto *I = dyn_cast<Instruction>(V))
4659         V = propagateMetadata(I, E->Scalars);
4660 
4661       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4662       V = ShuffleBuilder.finalize(V);
4663 
4664       E->VectorizedValue = V;
4665       ++NumVectorInstructions;
4666 
4667       return V;
4668     }
4669     case Instruction::Load: {
4670       // Loads are inserted at the head of the tree because we don't want to
4671       // sink them all the way down past store instructions.
4672       bool IsReorder = E->updateStateIfReorder();
4673       if (IsReorder)
4674         VL0 = E->getMainOp();
4675       setInsertPointAfterBundle(E);
4676 
4677       LoadInst *LI = cast<LoadInst>(VL0);
4678       Instruction *NewLI;
4679       unsigned AS = LI->getPointerAddressSpace();
4680       Value *PO = LI->getPointerOperand();
4681       if (E->State == TreeEntry::Vectorize) {
4682 
4683         Value *VecPtr = Builder.CreateBitCast(PO, VecTy->getPointerTo(AS));
4684 
4685         // The pointer operand uses an in-tree scalar so we add the new BitCast
4686         // to ExternalUses list to make sure that an extract will be generated
4687         // in the future.
4688         if (getTreeEntry(PO))
4689           ExternalUses.emplace_back(PO, cast<User>(VecPtr), 0);
4690 
4691         NewLI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign());
4692       } else {
4693         assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state");
4694         Value *VecPtr = vectorizeTree(E->getOperand(0));
4695         // Use the minimum alignment of the gathered loads.
4696         Align CommonAlignment = LI->getAlign();
4697         for (Value *V : E->Scalars)
4698           CommonAlignment =
4699               commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign());
4700         NewLI = Builder.CreateMaskedGather(VecPtr, CommonAlignment);
4701       }
4702       Value *V = propagateMetadata(NewLI, E->Scalars);
4703 
4704       ShuffleBuilder.addInversedMask(E->ReorderIndices);
4705       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4706       V = ShuffleBuilder.finalize(V);
4707       E->VectorizedValue = V;
4708       ++NumVectorInstructions;
4709       return V;
4710     }
4711     case Instruction::Store: {
4712       bool IsReorder = !E->ReorderIndices.empty();
4713       auto *SI = cast<StoreInst>(
4714           IsReorder ? E->Scalars[E->ReorderIndices.front()] : VL0);
4715       unsigned AS = SI->getPointerAddressSpace();
4716 
4717       setInsertPointAfterBundle(E);
4718 
4719       Value *VecValue = vectorizeTree(E->getOperand(0));
4720       ShuffleBuilder.addMask(E->ReorderIndices);
4721       VecValue = ShuffleBuilder.finalize(VecValue);
4722 
4723       Value *ScalarPtr = SI->getPointerOperand();
4724       Value *VecPtr = Builder.CreateBitCast(
4725           ScalarPtr, VecValue->getType()->getPointerTo(AS));
4726       StoreInst *ST = Builder.CreateAlignedStore(VecValue, VecPtr,
4727                                                  SI->getAlign());
4728 
4729       // The pointer operand uses an in-tree scalar, so add the new BitCast to
4730       // ExternalUses to make sure that an extract will be generated in the
4731       // future.
4732       if (getTreeEntry(ScalarPtr))
4733         ExternalUses.push_back(ExternalUser(ScalarPtr, cast<User>(VecPtr), 0));
4734 
4735       Value *V = propagateMetadata(ST, E->Scalars);
4736 
4737       E->VectorizedValue = V;
4738       ++NumVectorInstructions;
4739       return V;
4740     }
4741     case Instruction::GetElementPtr: {
4742       setInsertPointAfterBundle(E);
4743 
4744       Value *Op0 = vectorizeTree(E->getOperand(0));
4745 
4746       std::vector<Value *> OpVecs;
4747       for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e;
4748            ++j) {
4749         ValueList &VL = E->getOperand(j);
4750         // Need to cast all elements to the same type before vectorization to
4751         // avoid crash.
4752         Type *VL0Ty = VL0->getOperand(j)->getType();
4753         Type *Ty = llvm::all_of(
4754                        VL, [VL0Ty](Value *V) { return VL0Ty == V->getType(); })
4755                        ? VL0Ty
4756                        : DL->getIndexType(cast<GetElementPtrInst>(VL0)
4757                                               ->getPointerOperandType()
4758                                               ->getScalarType());
4759         for (Value *&V : VL) {
4760           auto *CI = cast<ConstantInt>(V);
4761           V = ConstantExpr::getIntegerCast(CI, Ty,
4762                                            CI->getValue().isSignBitSet());
4763         }
4764         Value *OpVec = vectorizeTree(VL);
4765         OpVecs.push_back(OpVec);
4766       }
4767 
4768       Value *V = Builder.CreateGEP(
4769           cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs);
4770       if (Instruction *I = dyn_cast<Instruction>(V))
4771         V = propagateMetadata(I, E->Scalars);
4772 
4773       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4774       V = ShuffleBuilder.finalize(V);
4775 
4776       E->VectorizedValue = V;
4777       ++NumVectorInstructions;
4778 
4779       return V;
4780     }
4781     case Instruction::Call: {
4782       CallInst *CI = cast<CallInst>(VL0);
4783       setInsertPointAfterBundle(E);
4784 
4785       Intrinsic::ID IID  = Intrinsic::not_intrinsic;
4786       if (Function *FI = CI->getCalledFunction())
4787         IID = FI->getIntrinsicID();
4788 
4789       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4790 
4791       auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI);
4792       bool UseIntrinsic = ID != Intrinsic::not_intrinsic &&
4793                           VecCallCosts.first <= VecCallCosts.second;
4794 
4795       Value *ScalarArg = nullptr;
4796       std::vector<Value *> OpVecs;
4797       for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) {
4798         ValueList OpVL;
4799         // Some intrinsics have scalar arguments. This argument should not be
4800         // vectorized.
4801         if (UseIntrinsic && hasVectorInstrinsicScalarOpd(IID, j)) {
4802           CallInst *CEI = cast<CallInst>(VL0);
4803           ScalarArg = CEI->getArgOperand(j);
4804           OpVecs.push_back(CEI->getArgOperand(j));
4805           continue;
4806         }
4807 
4808         Value *OpVec = vectorizeTree(E->getOperand(j));
4809         LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n");
4810         OpVecs.push_back(OpVec);
4811       }
4812 
4813       Function *CF;
4814       if (!UseIntrinsic) {
4815         VFShape Shape =
4816             VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>(
4817                                   VecTy->getNumElements())),
4818                          false /*HasGlobalPred*/);
4819         CF = VFDatabase(*CI).getVectorizedFunction(Shape);
4820       } else {
4821         Type *Tys[] = {FixedVectorType::get(CI->getType(), E->Scalars.size())};
4822         CF = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
4823       }
4824 
4825       SmallVector<OperandBundleDef, 1> OpBundles;
4826       CI->getOperandBundlesAsDefs(OpBundles);
4827       Value *V = Builder.CreateCall(CF, OpVecs, OpBundles);
4828 
4829       // The scalar argument uses an in-tree scalar so we add the new vectorized
4830       // call to ExternalUses list to make sure that an extract will be
4831       // generated in the future.
4832       if (ScalarArg && getTreeEntry(ScalarArg))
4833         ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0));
4834 
4835       propagateIRFlags(V, E->Scalars, VL0);
4836       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4837       V = ShuffleBuilder.finalize(V);
4838 
4839       E->VectorizedValue = V;
4840       ++NumVectorInstructions;
4841       return V;
4842     }
4843     case Instruction::ShuffleVector: {
4844       assert(E->isAltShuffle() &&
4845              ((Instruction::isBinaryOp(E->getOpcode()) &&
4846                Instruction::isBinaryOp(E->getAltOpcode())) ||
4847               (Instruction::isCast(E->getOpcode()) &&
4848                Instruction::isCast(E->getAltOpcode()))) &&
4849              "Invalid Shuffle Vector Operand");
4850 
4851       Value *LHS = nullptr, *RHS = nullptr;
4852       if (Instruction::isBinaryOp(E->getOpcode())) {
4853         setInsertPointAfterBundle(E);
4854         LHS = vectorizeTree(E->getOperand(0));
4855         RHS = vectorizeTree(E->getOperand(1));
4856       } else {
4857         setInsertPointAfterBundle(E);
4858         LHS = vectorizeTree(E->getOperand(0));
4859       }
4860 
4861       if (E->VectorizedValue) {
4862         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
4863         return E->VectorizedValue;
4864       }
4865 
4866       Value *V0, *V1;
4867       if (Instruction::isBinaryOp(E->getOpcode())) {
4868         V0 = Builder.CreateBinOp(
4869             static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS);
4870         V1 = Builder.CreateBinOp(
4871             static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS);
4872       } else {
4873         V0 = Builder.CreateCast(
4874             static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy);
4875         V1 = Builder.CreateCast(
4876             static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy);
4877       }
4878 
4879       // Create shuffle to take alternate operations from the vector.
4880       // Also, gather up main and alt scalar ops to propagate IR flags to
4881       // each vector operation.
4882       ValueList OpScalars, AltScalars;
4883       unsigned e = E->Scalars.size();
4884       SmallVector<int, 8> Mask(e);
4885       for (unsigned i = 0; i < e; ++i) {
4886         auto *OpInst = cast<Instruction>(E->Scalars[i]);
4887         assert(E->isOpcodeOrAlt(OpInst) && "Unexpected main/alternate opcode");
4888         if (OpInst->getOpcode() == E->getAltOpcode()) {
4889           Mask[i] = e + i;
4890           AltScalars.push_back(E->Scalars[i]);
4891         } else {
4892           Mask[i] = i;
4893           OpScalars.push_back(E->Scalars[i]);
4894         }
4895       }
4896 
4897       propagateIRFlags(V0, OpScalars);
4898       propagateIRFlags(V1, AltScalars);
4899 
4900       Value *V = Builder.CreateShuffleVector(V0, V1, Mask);
4901       if (Instruction *I = dyn_cast<Instruction>(V))
4902         V = propagateMetadata(I, E->Scalars);
4903       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4904       V = ShuffleBuilder.finalize(V);
4905 
4906       E->VectorizedValue = V;
4907       ++NumVectorInstructions;
4908 
4909       return V;
4910     }
4911     default:
4912     llvm_unreachable("unknown inst");
4913   }
4914   return nullptr;
4915 }
4916 
4917 Value *BoUpSLP::vectorizeTree() {
4918   ExtraValueToDebugLocsMap ExternallyUsedValues;
4919   return vectorizeTree(ExternallyUsedValues);
4920 }
4921 
4922 Value *
4923 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) {
4924   // All blocks must be scheduled before any instructions are inserted.
4925   for (auto &BSIter : BlocksSchedules) {
4926     scheduleBlock(BSIter.second.get());
4927   }
4928 
4929   Builder.SetInsertPoint(&F->getEntryBlock().front());
4930   auto *VectorRoot = vectorizeTree(VectorizableTree[0].get());
4931 
4932   // If the vectorized tree can be rewritten in a smaller type, we truncate the
4933   // vectorized root. InstCombine will then rewrite the entire expression. We
4934   // sign extend the extracted values below.
4935   auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
4936   if (MinBWs.count(ScalarRoot)) {
4937     if (auto *I = dyn_cast<Instruction>(VectorRoot)) {
4938       // If current instr is a phi and not the last phi, insert it after the
4939       // last phi node.
4940       if (isa<PHINode>(I))
4941         Builder.SetInsertPoint(&*I->getParent()->getFirstInsertionPt());
4942       else
4943         Builder.SetInsertPoint(&*++BasicBlock::iterator(I));
4944     }
4945     auto BundleWidth = VectorizableTree[0]->Scalars.size();
4946     auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
4947     auto *VecTy = FixedVectorType::get(MinTy, BundleWidth);
4948     auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy);
4949     VectorizableTree[0]->VectorizedValue = Trunc;
4950   }
4951 
4952   LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size()
4953                     << " values .\n");
4954 
4955   // If necessary, sign-extend or zero-extend ScalarRoot to the larger type
4956   // specified by ScalarType.
4957   auto extend = [&](Value *ScalarRoot, Value *Ex, Type *ScalarType) {
4958     if (!MinBWs.count(ScalarRoot))
4959       return Ex;
4960     if (MinBWs[ScalarRoot].second)
4961       return Builder.CreateSExt(Ex, ScalarType);
4962     return Builder.CreateZExt(Ex, ScalarType);
4963   };
4964 
4965   // Extract all of the elements with the external uses.
4966   for (const auto &ExternalUse : ExternalUses) {
4967     Value *Scalar = ExternalUse.Scalar;
4968     llvm::User *User = ExternalUse.User;
4969 
4970     // Skip users that we already RAUW. This happens when one instruction
4971     // has multiple uses of the same value.
4972     if (User && !is_contained(Scalar->users(), User))
4973       continue;
4974     TreeEntry *E = getTreeEntry(Scalar);
4975     assert(E && "Invalid scalar");
4976     assert(E->State != TreeEntry::NeedToGather &&
4977            "Extracting from a gather list");
4978 
4979     Value *Vec = E->VectorizedValue;
4980     assert(Vec && "Can't find vectorizable value");
4981 
4982     Value *Lane = Builder.getInt32(ExternalUse.Lane);
4983     // If User == nullptr, the Scalar is used as extra arg. Generate
4984     // ExtractElement instruction and update the record for this scalar in
4985     // ExternallyUsedValues.
4986     if (!User) {
4987       assert(ExternallyUsedValues.count(Scalar) &&
4988              "Scalar with nullptr as an external user must be registered in "
4989              "ExternallyUsedValues map");
4990       if (auto *VecI = dyn_cast<Instruction>(Vec)) {
4991         Builder.SetInsertPoint(VecI->getParent(),
4992                                std::next(VecI->getIterator()));
4993       } else {
4994         Builder.SetInsertPoint(&F->getEntryBlock().front());
4995       }
4996       Value *Ex = Builder.CreateExtractElement(Vec, Lane);
4997       Ex = extend(ScalarRoot, Ex, Scalar->getType());
4998       CSEBlocks.insert(cast<Instruction>(Scalar)->getParent());
4999       auto &Locs = ExternallyUsedValues[Scalar];
5000       ExternallyUsedValues.insert({Ex, Locs});
5001       ExternallyUsedValues.erase(Scalar);
5002       // Required to update internally referenced instructions.
5003       Scalar->replaceAllUsesWith(Ex);
5004       continue;
5005     }
5006 
5007     // Generate extracts for out-of-tree users.
5008     // Find the insertion point for the extractelement lane.
5009     if (auto *VecI = dyn_cast<Instruction>(Vec)) {
5010       if (PHINode *PH = dyn_cast<PHINode>(User)) {
5011         for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) {
5012           if (PH->getIncomingValue(i) == Scalar) {
5013             Instruction *IncomingTerminator =
5014                 PH->getIncomingBlock(i)->getTerminator();
5015             if (isa<CatchSwitchInst>(IncomingTerminator)) {
5016               Builder.SetInsertPoint(VecI->getParent(),
5017                                      std::next(VecI->getIterator()));
5018             } else {
5019               Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator());
5020             }
5021             Value *Ex = Builder.CreateExtractElement(Vec, Lane);
5022             Ex = extend(ScalarRoot, Ex, Scalar->getType());
5023             CSEBlocks.insert(PH->getIncomingBlock(i));
5024             PH->setOperand(i, Ex);
5025           }
5026         }
5027       } else {
5028         Builder.SetInsertPoint(cast<Instruction>(User));
5029         Value *Ex = Builder.CreateExtractElement(Vec, Lane);
5030         Ex = extend(ScalarRoot, Ex, Scalar->getType());
5031         CSEBlocks.insert(cast<Instruction>(User)->getParent());
5032         User->replaceUsesOfWith(Scalar, Ex);
5033       }
5034     } else {
5035       Builder.SetInsertPoint(&F->getEntryBlock().front());
5036       Value *Ex = Builder.CreateExtractElement(Vec, Lane);
5037       Ex = extend(ScalarRoot, Ex, Scalar->getType());
5038       CSEBlocks.insert(&F->getEntryBlock());
5039       User->replaceUsesOfWith(Scalar, Ex);
5040     }
5041 
5042     LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n");
5043   }
5044 
5045   // For each vectorized value:
5046   for (auto &TEPtr : VectorizableTree) {
5047     TreeEntry *Entry = TEPtr.get();
5048 
5049     // No need to handle users of gathered values.
5050     if (Entry->State == TreeEntry::NeedToGather)
5051       continue;
5052 
5053     assert(Entry->VectorizedValue && "Can't find vectorizable value");
5054 
5055     // For each lane:
5056     for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
5057       Value *Scalar = Entry->Scalars[Lane];
5058 
5059 #ifndef NDEBUG
5060       Type *Ty = Scalar->getType();
5061       if (!Ty->isVoidTy()) {
5062         for (User *U : Scalar->users()) {
5063           LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
5064 
5065           // It is legal to delete users in the ignorelist.
5066           assert((getTreeEntry(U) || is_contained(UserIgnoreList, U)) &&
5067                  "Deleting out-of-tree value");
5068         }
5069       }
5070 #endif
5071       LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
5072       eraseInstruction(cast<Instruction>(Scalar));
5073     }
5074   }
5075 
5076   Builder.ClearInsertionPoint();
5077   InstrElementSize.clear();
5078 
5079   return VectorizableTree[0]->VectorizedValue;
5080 }
5081 
5082 void BoUpSLP::optimizeGatherSequence() {
5083   LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size()
5084                     << " gather sequences instructions.\n");
5085   // LICM InsertElementInst sequences.
5086   for (Instruction *I : GatherSeq) {
5087     if (isDeleted(I))
5088       continue;
5089 
5090     // Check if this block is inside a loop.
5091     Loop *L = LI->getLoopFor(I->getParent());
5092     if (!L)
5093       continue;
5094 
5095     // Check if it has a preheader.
5096     BasicBlock *PreHeader = L->getLoopPreheader();
5097     if (!PreHeader)
5098       continue;
5099 
5100     // If the vector or the element that we insert into it are
5101     // instructions that are defined in this basic block then we can't
5102     // hoist this instruction.
5103     auto *Op0 = dyn_cast<Instruction>(I->getOperand(0));
5104     auto *Op1 = dyn_cast<Instruction>(I->getOperand(1));
5105     if (Op0 && L->contains(Op0))
5106       continue;
5107     if (Op1 && L->contains(Op1))
5108       continue;
5109 
5110     // We can hoist this instruction. Move it to the pre-header.
5111     I->moveBefore(PreHeader->getTerminator());
5112   }
5113 
5114   // Make a list of all reachable blocks in our CSE queue.
5115   SmallVector<const DomTreeNode *, 8> CSEWorkList;
5116   CSEWorkList.reserve(CSEBlocks.size());
5117   for (BasicBlock *BB : CSEBlocks)
5118     if (DomTreeNode *N = DT->getNode(BB)) {
5119       assert(DT->isReachableFromEntry(N));
5120       CSEWorkList.push_back(N);
5121     }
5122 
5123   // Sort blocks by domination. This ensures we visit a block after all blocks
5124   // dominating it are visited.
5125   llvm::stable_sort(CSEWorkList,
5126                     [this](const DomTreeNode *A, const DomTreeNode *B) {
5127                       return DT->properlyDominates(A, B);
5128                     });
5129 
5130   // Perform O(N^2) search over the gather sequences and merge identical
5131   // instructions. TODO: We can further optimize this scan if we split the
5132   // instructions into different buckets based on the insert lane.
5133   SmallVector<Instruction *, 16> Visited;
5134   for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) {
5135     assert(*I &&
5136            (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&
5137            "Worklist not sorted properly!");
5138     BasicBlock *BB = (*I)->getBlock();
5139     // For all instructions in blocks containing gather sequences:
5140     for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) {
5141       Instruction *In = &*it++;
5142       if (isDeleted(In))
5143         continue;
5144       if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In))
5145         continue;
5146 
5147       // Check if we can replace this instruction with any of the
5148       // visited instructions.
5149       for (Instruction *v : Visited) {
5150         if (In->isIdenticalTo(v) &&
5151             DT->dominates(v->getParent(), In->getParent())) {
5152           In->replaceAllUsesWith(v);
5153           eraseInstruction(In);
5154           In = nullptr;
5155           break;
5156         }
5157       }
5158       if (In) {
5159         assert(!is_contained(Visited, In));
5160         Visited.push_back(In);
5161       }
5162     }
5163   }
5164   CSEBlocks.clear();
5165   GatherSeq.clear();
5166 }
5167 
5168 // Groups the instructions to a bundle (which is then a single scheduling entity)
5169 // and schedules instructions until the bundle gets ready.
5170 Optional<BoUpSLP::ScheduleData *>
5171 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
5172                                             const InstructionsState &S) {
5173   if (isa<PHINode>(S.OpValue))
5174     return nullptr;
5175 
5176   // Initialize the instruction bundle.
5177   Instruction *OldScheduleEnd = ScheduleEnd;
5178   ScheduleData *PrevInBundle = nullptr;
5179   ScheduleData *Bundle = nullptr;
5180   bool ReSchedule = false;
5181   LLVM_DEBUG(dbgs() << "SLP:  bundle: " << *S.OpValue << "\n");
5182 
5183   auto &&TryScheduleBundle = [this, OldScheduleEnd, SLP](bool ReSchedule,
5184                                                          ScheduleData *Bundle) {
5185     // The scheduling region got new instructions at the lower end (or it is a
5186     // new region for the first bundle). This makes it necessary to
5187     // recalculate all dependencies.
5188     // It is seldom that this needs to be done a second time after adding the
5189     // initial bundle to the region.
5190     if (ScheduleEnd != OldScheduleEnd) {
5191       for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode())
5192         doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); });
5193       ReSchedule = true;
5194     }
5195     if (ReSchedule) {
5196       resetSchedule();
5197       initialFillReadyList(ReadyInsts);
5198     }
5199     if (Bundle) {
5200       LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle
5201                         << " in block " << BB->getName() << "\n");
5202       calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP);
5203     }
5204 
5205     // Now try to schedule the new bundle or (if no bundle) just calculate
5206     // dependencies. As soon as the bundle is "ready" it means that there are no
5207     // cyclic dependencies and we can schedule it. Note that's important that we
5208     // don't "schedule" the bundle yet (see cancelScheduling).
5209     while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) &&
5210            !ReadyInsts.empty()) {
5211       ScheduleData *Picked = ReadyInsts.pop_back_val();
5212       if (Picked->isSchedulingEntity() && Picked->isReady())
5213         schedule(Picked, ReadyInsts);
5214     }
5215   };
5216 
5217   // Make sure that the scheduling region contains all
5218   // instructions of the bundle.
5219   for (Value *V : VL) {
5220     if (!extendSchedulingRegion(V, S)) {
5221       // If the scheduling region got new instructions at the lower end (or it
5222       // is a new region for the first bundle). This makes it necessary to
5223       // recalculate all dependencies.
5224       // Otherwise the compiler may crash trying to incorrectly calculate
5225       // dependencies and emit instruction in the wrong order at the actual
5226       // scheduling.
5227       TryScheduleBundle(/*ReSchedule=*/false, nullptr);
5228       return None;
5229     }
5230   }
5231 
5232   for (Value *V : VL) {
5233     ScheduleData *BundleMember = getScheduleData(V);
5234     assert(BundleMember &&
5235            "no ScheduleData for bundle member (maybe not in same basic block)");
5236     if (BundleMember->IsScheduled) {
5237       // A bundle member was scheduled as single instruction before and now
5238       // needs to be scheduled as part of the bundle. We just get rid of the
5239       // existing schedule.
5240       LLVM_DEBUG(dbgs() << "SLP:  reset schedule because " << *BundleMember
5241                         << " was already scheduled\n");
5242       ReSchedule = true;
5243     }
5244     assert(BundleMember->isSchedulingEntity() &&
5245            "bundle member already part of other bundle");
5246     if (PrevInBundle) {
5247       PrevInBundle->NextInBundle = BundleMember;
5248     } else {
5249       Bundle = BundleMember;
5250     }
5251     BundleMember->UnscheduledDepsInBundle = 0;
5252     Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps;
5253 
5254     // Group the instructions to a bundle.
5255     BundleMember->FirstInBundle = Bundle;
5256     PrevInBundle = BundleMember;
5257   }
5258   assert(Bundle && "Failed to find schedule bundle");
5259   TryScheduleBundle(ReSchedule, Bundle);
5260   if (!Bundle->isReady()) {
5261     cancelScheduling(VL, S.OpValue);
5262     return None;
5263   }
5264   return Bundle;
5265 }
5266 
5267 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL,
5268                                                 Value *OpValue) {
5269   if (isa<PHINode>(OpValue))
5270     return;
5271 
5272   ScheduleData *Bundle = getScheduleData(OpValue);
5273   LLVM_DEBUG(dbgs() << "SLP:  cancel scheduling of " << *Bundle << "\n");
5274   assert(!Bundle->IsScheduled &&
5275          "Can't cancel bundle which is already scheduled");
5276   assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() &&
5277          "tried to unbundle something which is not a bundle");
5278 
5279   // Un-bundle: make single instructions out of the bundle.
5280   ScheduleData *BundleMember = Bundle;
5281   while (BundleMember) {
5282     assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links");
5283     BundleMember->FirstInBundle = BundleMember;
5284     ScheduleData *Next = BundleMember->NextInBundle;
5285     BundleMember->NextInBundle = nullptr;
5286     BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps;
5287     if (BundleMember->UnscheduledDepsInBundle == 0) {
5288       ReadyInsts.insert(BundleMember);
5289     }
5290     BundleMember = Next;
5291   }
5292 }
5293 
5294 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() {
5295   // Allocate a new ScheduleData for the instruction.
5296   if (ChunkPos >= ChunkSize) {
5297     ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize));
5298     ChunkPos = 0;
5299   }
5300   return &(ScheduleDataChunks.back()[ChunkPos++]);
5301 }
5302 
5303 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V,
5304                                                       const InstructionsState &S) {
5305   if (getScheduleData(V, isOneOf(S, V)))
5306     return true;
5307   Instruction *I = dyn_cast<Instruction>(V);
5308   assert(I && "bundle member must be an instruction");
5309   assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled");
5310   auto &&CheckSheduleForI = [this, &S](Instruction *I) -> bool {
5311     ScheduleData *ISD = getScheduleData(I);
5312     if (!ISD)
5313       return false;
5314     assert(isInSchedulingRegion(ISD) &&
5315            "ScheduleData not in scheduling region");
5316     ScheduleData *SD = allocateScheduleDataChunks();
5317     SD->Inst = I;
5318     SD->init(SchedulingRegionID, S.OpValue);
5319     ExtraScheduleDataMap[I][S.OpValue] = SD;
5320     return true;
5321   };
5322   if (CheckSheduleForI(I))
5323     return true;
5324   if (!ScheduleStart) {
5325     // It's the first instruction in the new region.
5326     initScheduleData(I, I->getNextNode(), nullptr, nullptr);
5327     ScheduleStart = I;
5328     ScheduleEnd = I->getNextNode();
5329     if (isOneOf(S, I) != I)
5330       CheckSheduleForI(I);
5331     assert(ScheduleEnd && "tried to vectorize a terminator?");
5332     LLVM_DEBUG(dbgs() << "SLP:  initialize schedule region to " << *I << "\n");
5333     return true;
5334   }
5335   // Search up and down at the same time, because we don't know if the new
5336   // instruction is above or below the existing scheduling region.
5337   BasicBlock::reverse_iterator UpIter =
5338       ++ScheduleStart->getIterator().getReverse();
5339   BasicBlock::reverse_iterator UpperEnd = BB->rend();
5340   BasicBlock::iterator DownIter = ScheduleEnd->getIterator();
5341   BasicBlock::iterator LowerEnd = BB->end();
5342   while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I &&
5343          &*DownIter != I) {
5344     if (++ScheduleRegionSize > ScheduleRegionSizeLimit) {
5345       LLVM_DEBUG(dbgs() << "SLP:  exceeded schedule region size limit\n");
5346       return false;
5347     }
5348 
5349     ++UpIter;
5350     ++DownIter;
5351   }
5352   if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) {
5353     assert(I->getParent() == ScheduleStart->getParent() &&
5354            "Instruction is in wrong basic block.");
5355     initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion);
5356     ScheduleStart = I;
5357     if (isOneOf(S, I) != I)
5358       CheckSheduleForI(I);
5359     LLVM_DEBUG(dbgs() << "SLP:  extend schedule region start to " << *I
5360                       << "\n");
5361     return true;
5362   }
5363   assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) &&
5364          "Expected to reach top of the basic block or instruction down the "
5365          "lower end.");
5366   assert(I->getParent() == ScheduleEnd->getParent() &&
5367          "Instruction is in wrong basic block.");
5368   initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion,
5369                    nullptr);
5370   ScheduleEnd = I->getNextNode();
5371   if (isOneOf(S, I) != I)
5372     CheckSheduleForI(I);
5373   assert(ScheduleEnd && "tried to vectorize a terminator?");
5374   LLVM_DEBUG(dbgs() << "SLP:  extend schedule region end to " << *I << "\n");
5375   return true;
5376 }
5377 
5378 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI,
5379                                                 Instruction *ToI,
5380                                                 ScheduleData *PrevLoadStore,
5381                                                 ScheduleData *NextLoadStore) {
5382   ScheduleData *CurrentLoadStore = PrevLoadStore;
5383   for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) {
5384     ScheduleData *SD = ScheduleDataMap[I];
5385     if (!SD) {
5386       SD = allocateScheduleDataChunks();
5387       ScheduleDataMap[I] = SD;
5388       SD->Inst = I;
5389     }
5390     assert(!isInSchedulingRegion(SD) &&
5391            "new ScheduleData already in scheduling region");
5392     SD->init(SchedulingRegionID, I);
5393 
5394     if (I->mayReadOrWriteMemory() &&
5395         (!isa<IntrinsicInst>(I) ||
5396          (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect &&
5397           cast<IntrinsicInst>(I)->getIntrinsicID() !=
5398               Intrinsic::pseudoprobe))) {
5399       // Update the linked list of memory accessing instructions.
5400       if (CurrentLoadStore) {
5401         CurrentLoadStore->NextLoadStore = SD;
5402       } else {
5403         FirstLoadStoreInRegion = SD;
5404       }
5405       CurrentLoadStore = SD;
5406     }
5407   }
5408   if (NextLoadStore) {
5409     if (CurrentLoadStore)
5410       CurrentLoadStore->NextLoadStore = NextLoadStore;
5411   } else {
5412     LastLoadStoreInRegion = CurrentLoadStore;
5413   }
5414 }
5415 
5416 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
5417                                                      bool InsertInReadyList,
5418                                                      BoUpSLP *SLP) {
5419   assert(SD->isSchedulingEntity());
5420 
5421   SmallVector<ScheduleData *, 10> WorkList;
5422   WorkList.push_back(SD);
5423 
5424   while (!WorkList.empty()) {
5425     ScheduleData *SD = WorkList.pop_back_val();
5426 
5427     ScheduleData *BundleMember = SD;
5428     while (BundleMember) {
5429       assert(isInSchedulingRegion(BundleMember));
5430       if (!BundleMember->hasValidDependencies()) {
5431 
5432         LLVM_DEBUG(dbgs() << "SLP:       update deps of " << *BundleMember
5433                           << "\n");
5434         BundleMember->Dependencies = 0;
5435         BundleMember->resetUnscheduledDeps();
5436 
5437         // Handle def-use chain dependencies.
5438         if (BundleMember->OpValue != BundleMember->Inst) {
5439           ScheduleData *UseSD = getScheduleData(BundleMember->Inst);
5440           if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
5441             BundleMember->Dependencies++;
5442             ScheduleData *DestBundle = UseSD->FirstInBundle;
5443             if (!DestBundle->IsScheduled)
5444               BundleMember->incrementUnscheduledDeps(1);
5445             if (!DestBundle->hasValidDependencies())
5446               WorkList.push_back(DestBundle);
5447           }
5448         } else {
5449           for (User *U : BundleMember->Inst->users()) {
5450             if (isa<Instruction>(U)) {
5451               ScheduleData *UseSD = getScheduleData(U);
5452               if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
5453                 BundleMember->Dependencies++;
5454                 ScheduleData *DestBundle = UseSD->FirstInBundle;
5455                 if (!DestBundle->IsScheduled)
5456                   BundleMember->incrementUnscheduledDeps(1);
5457                 if (!DestBundle->hasValidDependencies())
5458                   WorkList.push_back(DestBundle);
5459               }
5460             } else {
5461               // I'm not sure if this can ever happen. But we need to be safe.
5462               // This lets the instruction/bundle never be scheduled and
5463               // eventually disable vectorization.
5464               BundleMember->Dependencies++;
5465               BundleMember->incrementUnscheduledDeps(1);
5466             }
5467           }
5468         }
5469 
5470         // Handle the memory dependencies.
5471         ScheduleData *DepDest = BundleMember->NextLoadStore;
5472         if (DepDest) {
5473           Instruction *SrcInst = BundleMember->Inst;
5474           MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA);
5475           bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
5476           unsigned numAliased = 0;
5477           unsigned DistToSrc = 1;
5478 
5479           while (DepDest) {
5480             assert(isInSchedulingRegion(DepDest));
5481 
5482             // We have two limits to reduce the complexity:
5483             // 1) AliasedCheckLimit: It's a small limit to reduce calls to
5484             //    SLP->isAliased (which is the expensive part in this loop).
5485             // 2) MaxMemDepDistance: It's for very large blocks and it aborts
5486             //    the whole loop (even if the loop is fast, it's quadratic).
5487             //    It's important for the loop break condition (see below) to
5488             //    check this limit even between two read-only instructions.
5489             if (DistToSrc >= MaxMemDepDistance ||
5490                     ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
5491                      (numAliased >= AliasedCheckLimit ||
5492                       SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
5493 
5494               // We increment the counter only if the locations are aliased
5495               // (instead of counting all alias checks). This gives a better
5496               // balance between reduced runtime and accurate dependencies.
5497               numAliased++;
5498 
5499               DepDest->MemoryDependencies.push_back(BundleMember);
5500               BundleMember->Dependencies++;
5501               ScheduleData *DestBundle = DepDest->FirstInBundle;
5502               if (!DestBundle->IsScheduled) {
5503                 BundleMember->incrementUnscheduledDeps(1);
5504               }
5505               if (!DestBundle->hasValidDependencies()) {
5506                 WorkList.push_back(DestBundle);
5507               }
5508             }
5509             DepDest = DepDest->NextLoadStore;
5510 
5511             // Example, explaining the loop break condition: Let's assume our
5512             // starting instruction is i0 and MaxMemDepDistance = 3.
5513             //
5514             //                      +--------v--v--v
5515             //             i0,i1,i2,i3,i4,i5,i6,i7,i8
5516             //             +--------^--^--^
5517             //
5518             // MaxMemDepDistance let us stop alias-checking at i3 and we add
5519             // dependencies from i0 to i3,i4,.. (even if they are not aliased).
5520             // Previously we already added dependencies from i3 to i6,i7,i8
5521             // (because of MaxMemDepDistance). As we added a dependency from
5522             // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
5523             // and we can abort this loop at i6.
5524             if (DistToSrc >= 2 * MaxMemDepDistance)
5525               break;
5526             DistToSrc++;
5527           }
5528         }
5529       }
5530       BundleMember = BundleMember->NextInBundle;
5531     }
5532     if (InsertInReadyList && SD->isReady()) {
5533       ReadyInsts.push_back(SD);
5534       LLVM_DEBUG(dbgs() << "SLP:     gets ready on update: " << *SD->Inst
5535                         << "\n");
5536     }
5537   }
5538 }
5539 
5540 void BoUpSLP::BlockScheduling::resetSchedule() {
5541   assert(ScheduleStart &&
5542          "tried to reset schedule on block which has not been scheduled");
5543   for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
5544     doForAllOpcodes(I, [&](ScheduleData *SD) {
5545       assert(isInSchedulingRegion(SD) &&
5546              "ScheduleData not in scheduling region");
5547       SD->IsScheduled = false;
5548       SD->resetUnscheduledDeps();
5549     });
5550   }
5551   ReadyInsts.clear();
5552 }
5553 
5554 void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
5555   if (!BS->ScheduleStart)
5556     return;
5557 
5558   LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n");
5559 
5560   BS->resetSchedule();
5561 
5562   // For the real scheduling we use a more sophisticated ready-list: it is
5563   // sorted by the original instruction location. This lets the final schedule
5564   // be as  close as possible to the original instruction order.
5565   struct ScheduleDataCompare {
5566     bool operator()(ScheduleData *SD1, ScheduleData *SD2) const {
5567       return SD2->SchedulingPriority < SD1->SchedulingPriority;
5568     }
5569   };
5570   std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts;
5571 
5572   // Ensure that all dependency data is updated and fill the ready-list with
5573   // initial instructions.
5574   int Idx = 0;
5575   int NumToSchedule = 0;
5576   for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd;
5577        I = I->getNextNode()) {
5578     BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) {
5579       assert(SD->isPartOfBundle() ==
5580                  (getTreeEntry(SD->Inst) != nullptr) &&
5581              "scheduler and vectorizer bundle mismatch");
5582       SD->FirstInBundle->SchedulingPriority = Idx++;
5583       if (SD->isSchedulingEntity()) {
5584         BS->calculateDependencies(SD, false, this);
5585         NumToSchedule++;
5586       }
5587     });
5588   }
5589   BS->initialFillReadyList(ReadyInsts);
5590 
5591   Instruction *LastScheduledInst = BS->ScheduleEnd;
5592 
5593   // Do the "real" scheduling.
5594   while (!ReadyInsts.empty()) {
5595     ScheduleData *picked = *ReadyInsts.begin();
5596     ReadyInsts.erase(ReadyInsts.begin());
5597 
5598     // Move the scheduled instruction(s) to their dedicated places, if not
5599     // there yet.
5600     ScheduleData *BundleMember = picked;
5601     while (BundleMember) {
5602       Instruction *pickedInst = BundleMember->Inst;
5603       if (LastScheduledInst->getNextNode() != pickedInst) {
5604         BS->BB->getInstList().remove(pickedInst);
5605         BS->BB->getInstList().insert(LastScheduledInst->getIterator(),
5606                                      pickedInst);
5607       }
5608       LastScheduledInst = pickedInst;
5609       BundleMember = BundleMember->NextInBundle;
5610     }
5611 
5612     BS->schedule(picked, ReadyInsts);
5613     NumToSchedule--;
5614   }
5615   assert(NumToSchedule == 0 && "could not schedule all instructions");
5616 
5617   // Avoid duplicate scheduling of the block.
5618   BS->ScheduleStart = nullptr;
5619 }
5620 
5621 unsigned BoUpSLP::getVectorElementSize(Value *V) {
5622   // If V is a store, just return the width of the stored value (or value
5623   // truncated just before storing) without traversing the expression tree.
5624   // This is the common case.
5625   if (auto *Store = dyn_cast<StoreInst>(V)) {
5626     if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand()))
5627       return DL->getTypeSizeInBits(Trunc->getSrcTy());
5628     return DL->getTypeSizeInBits(Store->getValueOperand()->getType());
5629   }
5630 
5631   auto E = InstrElementSize.find(V);
5632   if (E != InstrElementSize.end())
5633     return E->second;
5634 
5635   // If V is not a store, we can traverse the expression tree to find loads
5636   // that feed it. The type of the loaded value may indicate a more suitable
5637   // width than V's type. We want to base the vector element size on the width
5638   // of memory operations where possible.
5639   SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist;
5640   SmallPtrSet<Instruction *, 16> Visited;
5641   if (auto *I = dyn_cast<Instruction>(V)) {
5642     Worklist.emplace_back(I, I->getParent());
5643     Visited.insert(I);
5644   }
5645 
5646   // Traverse the expression tree in bottom-up order looking for loads. If we
5647   // encounter an instruction we don't yet handle, we give up.
5648   auto Width = 0u;
5649   while (!Worklist.empty()) {
5650     Instruction *I;
5651     BasicBlock *Parent;
5652     std::tie(I, Parent) = Worklist.pop_back_val();
5653 
5654     // We should only be looking at scalar instructions here. If the current
5655     // instruction has a vector type, skip.
5656     auto *Ty = I->getType();
5657     if (isa<VectorType>(Ty))
5658       continue;
5659 
5660     // If the current instruction is a load, update MaxWidth to reflect the
5661     // width of the loaded value.
5662     if (isa<LoadInst>(I) || isa<ExtractElementInst>(I) ||
5663         isa<ExtractValueInst>(I))
5664       Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty));
5665 
5666     // Otherwise, we need to visit the operands of the instruction. We only
5667     // handle the interesting cases from buildTree here. If an operand is an
5668     // instruction we haven't yet visited and from the same basic block as the
5669     // user or the use is a PHI node, we add it to the worklist.
5670     else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
5671              isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I) ||
5672              isa<UnaryOperator>(I)) {
5673       for (Use &U : I->operands())
5674         if (auto *J = dyn_cast<Instruction>(U.get()))
5675           if (Visited.insert(J).second &&
5676               (isa<PHINode>(I) || J->getParent() == Parent))
5677             Worklist.emplace_back(J, J->getParent());
5678     } else {
5679       break;
5680     }
5681   }
5682 
5683   // If we didn't encounter a memory access in the expression tree, or if we
5684   // gave up for some reason, just return the width of V. Otherwise, return the
5685   // maximum width we found.
5686   if (!Width) {
5687     if (auto *CI = dyn_cast<CmpInst>(V))
5688       V = CI->getOperand(0);
5689     Width = DL->getTypeSizeInBits(V->getType());
5690   }
5691 
5692   for (Instruction *I : Visited)
5693     InstrElementSize[I] = Width;
5694 
5695   return Width;
5696 }
5697 
5698 // Determine if a value V in a vectorizable expression Expr can be demoted to a
5699 // smaller type with a truncation. We collect the values that will be demoted
5700 // in ToDemote and additional roots that require investigating in Roots.
5701 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr,
5702                                   SmallVectorImpl<Value *> &ToDemote,
5703                                   SmallVectorImpl<Value *> &Roots) {
5704   // We can always demote constants.
5705   if (isa<Constant>(V)) {
5706     ToDemote.push_back(V);
5707     return true;
5708   }
5709 
5710   // If the value is not an instruction in the expression with only one use, it
5711   // cannot be demoted.
5712   auto *I = dyn_cast<Instruction>(V);
5713   if (!I || !I->hasOneUse() || !Expr.count(I))
5714     return false;
5715 
5716   switch (I->getOpcode()) {
5717 
5718   // We can always demote truncations and extensions. Since truncations can
5719   // seed additional demotion, we save the truncated value.
5720   case Instruction::Trunc:
5721     Roots.push_back(I->getOperand(0));
5722     break;
5723   case Instruction::ZExt:
5724   case Instruction::SExt:
5725     break;
5726 
5727   // We can demote certain binary operations if we can demote both of their
5728   // operands.
5729   case Instruction::Add:
5730   case Instruction::Sub:
5731   case Instruction::Mul:
5732   case Instruction::And:
5733   case Instruction::Or:
5734   case Instruction::Xor:
5735     if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) ||
5736         !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots))
5737       return false;
5738     break;
5739 
5740   // We can demote selects if we can demote their true and false values.
5741   case Instruction::Select: {
5742     SelectInst *SI = cast<SelectInst>(I);
5743     if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) ||
5744         !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots))
5745       return false;
5746     break;
5747   }
5748 
5749   // We can demote phis if we can demote all their incoming operands. Note that
5750   // we don't need to worry about cycles since we ensure single use above.
5751   case Instruction::PHI: {
5752     PHINode *PN = cast<PHINode>(I);
5753     for (Value *IncValue : PN->incoming_values())
5754       if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots))
5755         return false;
5756     break;
5757   }
5758 
5759   // Otherwise, conservatively give up.
5760   default:
5761     return false;
5762   }
5763 
5764   // Record the value that we can demote.
5765   ToDemote.push_back(V);
5766   return true;
5767 }
5768 
5769 void BoUpSLP::computeMinimumValueSizes() {
5770   // If there are no external uses, the expression tree must be rooted by a
5771   // store. We can't demote in-memory values, so there is nothing to do here.
5772   if (ExternalUses.empty())
5773     return;
5774 
5775   // We only attempt to truncate integer expressions.
5776   auto &TreeRoot = VectorizableTree[0]->Scalars;
5777   auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType());
5778   if (!TreeRootIT)
5779     return;
5780 
5781   // If the expression is not rooted by a store, these roots should have
5782   // external uses. We will rely on InstCombine to rewrite the expression in
5783   // the narrower type. However, InstCombine only rewrites single-use values.
5784   // This means that if a tree entry other than a root is used externally, it
5785   // must have multiple uses and InstCombine will not rewrite it. The code
5786   // below ensures that only the roots are used externally.
5787   SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end());
5788   for (auto &EU : ExternalUses)
5789     if (!Expr.erase(EU.Scalar))
5790       return;
5791   if (!Expr.empty())
5792     return;
5793 
5794   // Collect the scalar values of the vectorizable expression. We will use this
5795   // context to determine which values can be demoted. If we see a truncation,
5796   // we mark it as seeding another demotion.
5797   for (auto &EntryPtr : VectorizableTree)
5798     Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end());
5799 
5800   // Ensure the roots of the vectorizable tree don't form a cycle. They must
5801   // have a single external user that is not in the vectorizable tree.
5802   for (auto *Root : TreeRoot)
5803     if (!Root->hasOneUse() || Expr.count(*Root->user_begin()))
5804       return;
5805 
5806   // Conservatively determine if we can actually truncate the roots of the
5807   // expression. Collect the values that can be demoted in ToDemote and
5808   // additional roots that require investigating in Roots.
5809   SmallVector<Value *, 32> ToDemote;
5810   SmallVector<Value *, 4> Roots;
5811   for (auto *Root : TreeRoot)
5812     if (!collectValuesToDemote(Root, Expr, ToDemote, Roots))
5813       return;
5814 
5815   // The maximum bit width required to represent all the values that can be
5816   // demoted without loss of precision. It would be safe to truncate the roots
5817   // of the expression to this width.
5818   auto MaxBitWidth = 8u;
5819 
5820   // We first check if all the bits of the roots are demanded. If they're not,
5821   // we can truncate the roots to this narrower type.
5822   for (auto *Root : TreeRoot) {
5823     auto Mask = DB->getDemandedBits(cast<Instruction>(Root));
5824     MaxBitWidth = std::max<unsigned>(
5825         Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth);
5826   }
5827 
5828   // True if the roots can be zero-extended back to their original type, rather
5829   // than sign-extended. We know that if the leading bits are not demanded, we
5830   // can safely zero-extend. So we initialize IsKnownPositive to True.
5831   bool IsKnownPositive = true;
5832 
5833   // If all the bits of the roots are demanded, we can try a little harder to
5834   // compute a narrower type. This can happen, for example, if the roots are
5835   // getelementptr indices. InstCombine promotes these indices to the pointer
5836   // width. Thus, all their bits are technically demanded even though the
5837   // address computation might be vectorized in a smaller type.
5838   //
5839   // We start by looking at each entry that can be demoted. We compute the
5840   // maximum bit width required to store the scalar by using ValueTracking to
5841   // compute the number of high-order bits we can truncate.
5842   if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) &&
5843       llvm::all_of(TreeRoot, [](Value *R) {
5844         assert(R->hasOneUse() && "Root should have only one use!");
5845         return isa<GetElementPtrInst>(R->user_back());
5846       })) {
5847     MaxBitWidth = 8u;
5848 
5849     // Determine if the sign bit of all the roots is known to be zero. If not,
5850     // IsKnownPositive is set to False.
5851     IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) {
5852       KnownBits Known = computeKnownBits(R, *DL);
5853       return Known.isNonNegative();
5854     });
5855 
5856     // Determine the maximum number of bits required to store the scalar
5857     // values.
5858     for (auto *Scalar : ToDemote) {
5859       auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT);
5860       auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType());
5861       MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth);
5862     }
5863 
5864     // If we can't prove that the sign bit is zero, we must add one to the
5865     // maximum bit width to account for the unknown sign bit. This preserves
5866     // the existing sign bit so we can safely sign-extend the root back to the
5867     // original type. Otherwise, if we know the sign bit is zero, we will
5868     // zero-extend the root instead.
5869     //
5870     // FIXME: This is somewhat suboptimal, as there will be cases where adding
5871     //        one to the maximum bit width will yield a larger-than-necessary
5872     //        type. In general, we need to add an extra bit only if we can't
5873     //        prove that the upper bit of the original type is equal to the
5874     //        upper bit of the proposed smaller type. If these two bits are the
5875     //        same (either zero or one) we know that sign-extending from the
5876     //        smaller type will result in the same value. Here, since we can't
5877     //        yet prove this, we are just making the proposed smaller type
5878     //        larger to ensure correctness.
5879     if (!IsKnownPositive)
5880       ++MaxBitWidth;
5881   }
5882 
5883   // Round MaxBitWidth up to the next power-of-two.
5884   if (!isPowerOf2_64(MaxBitWidth))
5885     MaxBitWidth = NextPowerOf2(MaxBitWidth);
5886 
5887   // If the maximum bit width we compute is less than the with of the roots'
5888   // type, we can proceed with the narrowing. Otherwise, do nothing.
5889   if (MaxBitWidth >= TreeRootIT->getBitWidth())
5890     return;
5891 
5892   // If we can truncate the root, we must collect additional values that might
5893   // be demoted as a result. That is, those seeded by truncations we will
5894   // modify.
5895   while (!Roots.empty())
5896     collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots);
5897 
5898   // Finally, map the values we can demote to the maximum bit with we computed.
5899   for (auto *Scalar : ToDemote)
5900     MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive);
5901 }
5902 
5903 namespace {
5904 
5905 /// The SLPVectorizer Pass.
5906 struct SLPVectorizer : public FunctionPass {
5907   SLPVectorizerPass Impl;
5908 
5909   /// Pass identification, replacement for typeid
5910   static char ID;
5911 
5912   explicit SLPVectorizer() : FunctionPass(ID) {
5913     initializeSLPVectorizerPass(*PassRegistry::getPassRegistry());
5914   }
5915 
5916   bool doInitialization(Module &M) override {
5917     return false;
5918   }
5919 
5920   bool runOnFunction(Function &F) override {
5921     if (skipFunction(F))
5922       return false;
5923 
5924     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
5925     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
5926     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
5927     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
5928     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
5929     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
5930     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5931     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
5932     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
5933     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
5934 
5935     return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE);
5936   }
5937 
5938   void getAnalysisUsage(AnalysisUsage &AU) const override {
5939     FunctionPass::getAnalysisUsage(AU);
5940     AU.addRequired<AssumptionCacheTracker>();
5941     AU.addRequired<ScalarEvolutionWrapperPass>();
5942     AU.addRequired<AAResultsWrapperPass>();
5943     AU.addRequired<TargetTransformInfoWrapperPass>();
5944     AU.addRequired<LoopInfoWrapperPass>();
5945     AU.addRequired<DominatorTreeWrapperPass>();
5946     AU.addRequired<DemandedBitsWrapperPass>();
5947     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
5948     AU.addRequired<InjectTLIMappingsLegacy>();
5949     AU.addPreserved<LoopInfoWrapperPass>();
5950     AU.addPreserved<DominatorTreeWrapperPass>();
5951     AU.addPreserved<AAResultsWrapperPass>();
5952     AU.addPreserved<GlobalsAAWrapperPass>();
5953     AU.setPreservesCFG();
5954   }
5955 };
5956 
5957 } // end anonymous namespace
5958 
5959 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) {
5960   auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
5961   auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
5962   auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F);
5963   auto *AA = &AM.getResult<AAManager>(F);
5964   auto *LI = &AM.getResult<LoopAnalysis>(F);
5965   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
5966   auto *AC = &AM.getResult<AssumptionAnalysis>(F);
5967   auto *DB = &AM.getResult<DemandedBitsAnalysis>(F);
5968   auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
5969 
5970   bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE);
5971   if (!Changed)
5972     return PreservedAnalyses::all();
5973 
5974   PreservedAnalyses PA;
5975   PA.preserveSet<CFGAnalyses>();
5976   PA.preserve<AAManager>();
5977   PA.preserve<GlobalsAA>();
5978   return PA;
5979 }
5980 
5981 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
5982                                 TargetTransformInfo *TTI_,
5983                                 TargetLibraryInfo *TLI_, AAResults *AA_,
5984                                 LoopInfo *LI_, DominatorTree *DT_,
5985                                 AssumptionCache *AC_, DemandedBits *DB_,
5986                                 OptimizationRemarkEmitter *ORE_) {
5987   if (!RunSLPVectorization)
5988     return false;
5989   SE = SE_;
5990   TTI = TTI_;
5991   TLI = TLI_;
5992   AA = AA_;
5993   LI = LI_;
5994   DT = DT_;
5995   AC = AC_;
5996   DB = DB_;
5997   DL = &F.getParent()->getDataLayout();
5998 
5999   Stores.clear();
6000   GEPs.clear();
6001   bool Changed = false;
6002 
6003   // If the target claims to have no vector registers don't attempt
6004   // vectorization.
6005   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)))
6006     return false;
6007 
6008   // Don't vectorize when the attribute NoImplicitFloat is used.
6009   if (F.hasFnAttribute(Attribute::NoImplicitFloat))
6010     return false;
6011 
6012   LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
6013 
6014   // Use the bottom up slp vectorizer to construct chains that start with
6015   // store instructions.
6016   BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_);
6017 
6018   // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to
6019   // delete instructions.
6020 
6021   // Scan the blocks in the function in post order.
6022   for (auto BB : post_order(&F.getEntryBlock())) {
6023     collectSeedInstructions(BB);
6024 
6025     // Vectorize trees that end at stores.
6026     if (!Stores.empty()) {
6027       LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()
6028                         << " underlying objects.\n");
6029       Changed |= vectorizeStoreChains(R);
6030     }
6031 
6032     // Vectorize trees that end at reductions.
6033     Changed |= vectorizeChainsInBlock(BB, R);
6034 
6035     // Vectorize the index computations of getelementptr instructions. This
6036     // is primarily intended to catch gather-like idioms ending at
6037     // non-consecutive loads.
6038     if (!GEPs.empty()) {
6039       LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()
6040                         << " underlying objects.\n");
6041       Changed |= vectorizeGEPIndices(BB, R);
6042     }
6043   }
6044 
6045   if (Changed) {
6046     R.optimizeGatherSequence();
6047     LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
6048   }
6049   return Changed;
6050 }
6051 
6052 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
6053                                             unsigned Idx) {
6054   LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size()
6055                     << "\n");
6056   const unsigned Sz = R.getVectorElementSize(Chain[0]);
6057   const unsigned MinVF = R.getMinVecRegSize() / Sz;
6058   unsigned VF = Chain.size();
6059 
6060   if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF)
6061     return false;
6062 
6063   LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx
6064                     << "\n");
6065 
6066   R.buildTree(Chain);
6067   Optional<ArrayRef<unsigned>> Order = R.bestOrder();
6068   // TODO: Handle orders of size less than number of elements in the vector.
6069   if (Order && Order->size() == Chain.size()) {
6070     // TODO: reorder tree nodes without tree rebuilding.
6071     SmallVector<Value *, 4> ReorderedOps(Chain.rbegin(), Chain.rend());
6072     llvm::transform(*Order, ReorderedOps.begin(),
6073                     [Chain](const unsigned Idx) { return Chain[Idx]; });
6074     R.buildTree(ReorderedOps);
6075   }
6076   if (R.isTreeTinyAndNotFullyVectorizable())
6077     return false;
6078   if (R.isLoadCombineCandidate())
6079     return false;
6080 
6081   R.computeMinimumValueSizes();
6082 
6083   InstructionCost Cost = R.getTreeCost();
6084 
6085   LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF =" << VF << "\n");
6086   if (Cost < -SLPCostThreshold) {
6087     LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n");
6088 
6089     using namespace ore;
6090 
6091     R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized",
6092                                         cast<StoreInst>(Chain[0]))
6093                      << "Stores SLP vectorized with cost " << NV("Cost", Cost)
6094                      << " and with tree size "
6095                      << NV("TreeSize", R.getTreeSize()));
6096 
6097     R.vectorizeTree();
6098     return true;
6099   }
6100 
6101   return false;
6102 }
6103 
6104 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores,
6105                                         BoUpSLP &R) {
6106   // We may run into multiple chains that merge into a single chain. We mark the
6107   // stores that we vectorized so that we don't visit the same store twice.
6108   BoUpSLP::ValueSet VectorizedStores;
6109   bool Changed = false;
6110 
6111   int E = Stores.size();
6112   SmallBitVector Tails(E, false);
6113   int MaxIter = MaxStoreLookup.getValue();
6114   SmallVector<std::pair<int, int>, 16> ConsecutiveChain(
6115       E, std::make_pair(E, INT_MAX));
6116   SmallVector<SmallBitVector, 4> CheckedPairs(E, SmallBitVector(E, false));
6117   int IterCnt;
6118   auto &&FindConsecutiveAccess = [this, &Stores, &Tails, &IterCnt, MaxIter,
6119                                   &CheckedPairs,
6120                                   &ConsecutiveChain](int K, int Idx) {
6121     if (IterCnt >= MaxIter)
6122       return true;
6123     if (CheckedPairs[Idx].test(K))
6124       return ConsecutiveChain[K].second == 1 &&
6125              ConsecutiveChain[K].first == Idx;
6126     ++IterCnt;
6127     CheckedPairs[Idx].set(K);
6128     CheckedPairs[K].set(Idx);
6129     Optional<int> Diff = getPointersDiff(Stores[K]->getPointerOperand(),
6130                                          Stores[Idx]->getPointerOperand(), *DL,
6131                                          *SE, /*StrictCheck=*/true);
6132     if (!Diff || *Diff == 0)
6133       return false;
6134     int Val = *Diff;
6135     if (Val < 0) {
6136       if (ConsecutiveChain[Idx].second > -Val) {
6137         Tails.set(K);
6138         ConsecutiveChain[Idx] = std::make_pair(K, -Val);
6139       }
6140       return false;
6141     }
6142     if (ConsecutiveChain[K].second <= Val)
6143       return false;
6144 
6145     Tails.set(Idx);
6146     ConsecutiveChain[K] = std::make_pair(Idx, Val);
6147     return Val == 1;
6148   };
6149   // Do a quadratic search on all of the given stores in reverse order and find
6150   // all of the pairs of stores that follow each other.
6151   for (int Idx = E - 1; Idx >= 0; --Idx) {
6152     // If a store has multiple consecutive store candidates, search according
6153     // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ...
6154     // This is because usually pairing with immediate succeeding or preceding
6155     // candidate create the best chance to find slp vectorization opportunity.
6156     const int MaxLookDepth = std::max(E - Idx, Idx + 1);
6157     IterCnt = 0;
6158     for (int Offset = 1, F = MaxLookDepth; Offset < F; ++Offset)
6159       if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) ||
6160           (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx)))
6161         break;
6162   }
6163 
6164   // For stores that start but don't end a link in the chain:
6165   for (int Cnt = E; Cnt > 0; --Cnt) {
6166     int I = Cnt - 1;
6167     if (ConsecutiveChain[I].first == E || Tails.test(I))
6168       continue;
6169     // We found a store instr that starts a chain. Now follow the chain and try
6170     // to vectorize it.
6171     BoUpSLP::ValueList Operands;
6172     // Collect the chain into a list.
6173     while (I != E && !VectorizedStores.count(Stores[I])) {
6174       Operands.push_back(Stores[I]);
6175       Tails.set(I);
6176       if (ConsecutiveChain[I].second != 1) {
6177         // Mark the new end in the chain and go back, if required. It might be
6178         // required if the original stores come in reversed order, for example.
6179         if (ConsecutiveChain[I].first != E &&
6180             Tails.test(ConsecutiveChain[I].first) &&
6181             !VectorizedStores.count(Stores[ConsecutiveChain[I].first])) {
6182           Tails.reset(ConsecutiveChain[I].first);
6183           if (Cnt < ConsecutiveChain[I].first + 2)
6184             Cnt = ConsecutiveChain[I].first + 2;
6185         }
6186         break;
6187       }
6188       // Move to the next value in the chain.
6189       I = ConsecutiveChain[I].first;
6190     }
6191     assert(!Operands.empty() && "Expected non-empty list of stores.");
6192 
6193     unsigned MaxVecRegSize = R.getMaxVecRegSize();
6194     unsigned EltSize = R.getVectorElementSize(Operands[0]);
6195     unsigned MaxElts = llvm::PowerOf2Floor(MaxVecRegSize / EltSize);
6196 
6197     unsigned MinVF = std::max(2U, R.getMinVecRegSize() / EltSize);
6198     unsigned MaxVF = std::min(R.getMaximumVF(EltSize, Instruction::Store),
6199                               MaxElts);
6200 
6201     // FIXME: Is division-by-2 the correct step? Should we assert that the
6202     // register size is a power-of-2?
6203     unsigned StartIdx = 0;
6204     for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) {
6205       for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) {
6206         ArrayRef<Value *> Slice = makeArrayRef(Operands).slice(Cnt, Size);
6207         if (!VectorizedStores.count(Slice.front()) &&
6208             !VectorizedStores.count(Slice.back()) &&
6209             vectorizeStoreChain(Slice, R, Cnt)) {
6210           // Mark the vectorized stores so that we don't vectorize them again.
6211           VectorizedStores.insert(Slice.begin(), Slice.end());
6212           Changed = true;
6213           // If we vectorized initial block, no need to try to vectorize it
6214           // again.
6215           if (Cnt == StartIdx)
6216             StartIdx += Size;
6217           Cnt += Size;
6218           continue;
6219         }
6220         ++Cnt;
6221       }
6222       // Check if the whole array was vectorized already - exit.
6223       if (StartIdx >= Operands.size())
6224         break;
6225     }
6226   }
6227 
6228   return Changed;
6229 }
6230 
6231 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) {
6232   // Initialize the collections. We will make a single pass over the block.
6233   Stores.clear();
6234   GEPs.clear();
6235 
6236   // Visit the store and getelementptr instructions in BB and organize them in
6237   // Stores and GEPs according to the underlying objects of their pointer
6238   // operands.
6239   for (Instruction &I : *BB) {
6240     // Ignore store instructions that are volatile or have a pointer operand
6241     // that doesn't point to a scalar type.
6242     if (auto *SI = dyn_cast<StoreInst>(&I)) {
6243       if (!SI->isSimple())
6244         continue;
6245       if (!isValidElementType(SI->getValueOperand()->getType()))
6246         continue;
6247       Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI);
6248     }
6249 
6250     // Ignore getelementptr instructions that have more than one index, a
6251     // constant index, or a pointer operand that doesn't point to a scalar
6252     // type.
6253     else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
6254       auto Idx = GEP->idx_begin()->get();
6255       if (GEP->getNumIndices() > 1 || isa<Constant>(Idx))
6256         continue;
6257       if (!isValidElementType(Idx->getType()))
6258         continue;
6259       if (GEP->getType()->isVectorTy())
6260         continue;
6261       GEPs[GEP->getPointerOperand()].push_back(GEP);
6262     }
6263   }
6264 }
6265 
6266 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) {
6267   if (!A || !B)
6268     return false;
6269   Value *VL[] = {A, B};
6270   return tryToVectorizeList(VL, R, /*AllowReorder=*/true);
6271 }
6272 
6273 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
6274                                            bool AllowReorder,
6275                                            ArrayRef<Value *> InsertUses) {
6276   if (VL.size() < 2)
6277     return false;
6278 
6279   LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = "
6280                     << VL.size() << ".\n");
6281 
6282   // Check that all of the parts are instructions of the same type,
6283   // we permit an alternate opcode via InstructionsState.
6284   InstructionsState S = getSameOpcode(VL);
6285   if (!S.getOpcode())
6286     return false;
6287 
6288   Instruction *I0 = cast<Instruction>(S.OpValue);
6289   // Make sure invalid types (including vector type) are rejected before
6290   // determining vectorization factor for scalar instructions.
6291   for (Value *V : VL) {
6292     Type *Ty = V->getType();
6293     if (!isValidElementType(Ty)) {
6294       // NOTE: the following will give user internal llvm type name, which may
6295       // not be useful.
6296       R.getORE()->emit([&]() {
6297         std::string type_str;
6298         llvm::raw_string_ostream rso(type_str);
6299         Ty->print(rso);
6300         return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0)
6301                << "Cannot SLP vectorize list: type "
6302                << rso.str() + " is unsupported by vectorizer";
6303       });
6304       return false;
6305     }
6306   }
6307 
6308   unsigned Sz = R.getVectorElementSize(I0);
6309   unsigned MinVF = std::max(2U, R.getMinVecRegSize() / Sz);
6310   unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF);
6311   MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF);
6312   if (MaxVF < 2) {
6313     R.getORE()->emit([&]() {
6314       return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0)
6315              << "Cannot SLP vectorize list: vectorization factor "
6316              << "less than 2 is not supported";
6317     });
6318     return false;
6319   }
6320 
6321   bool Changed = false;
6322   bool CandidateFound = false;
6323   InstructionCost MinCost = SLPCostThreshold.getValue();
6324 
6325   bool CompensateUseCost =
6326       !InsertUses.empty() && llvm::all_of(InsertUses, [](const Value *V) {
6327         return V && isa<InsertElementInst>(V);
6328       });
6329   assert((!CompensateUseCost || InsertUses.size() == VL.size()) &&
6330          "Each scalar expected to have an associated InsertElement user.");
6331 
6332   unsigned NextInst = 0, MaxInst = VL.size();
6333   for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) {
6334     // No actual vectorization should happen, if number of parts is the same as
6335     // provided vectorization factor (i.e. the scalar type is used for vector
6336     // code during codegen).
6337     auto *VecTy = FixedVectorType::get(VL[0]->getType(), VF);
6338     if (TTI->getNumberOfParts(VecTy) == VF)
6339       continue;
6340     for (unsigned I = NextInst; I < MaxInst; ++I) {
6341       unsigned OpsWidth = 0;
6342 
6343       if (I + VF > MaxInst)
6344         OpsWidth = MaxInst - I;
6345       else
6346         OpsWidth = VF;
6347 
6348       if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2)
6349         break;
6350 
6351       ArrayRef<Value *> Ops = VL.slice(I, OpsWidth);
6352       // Check that a previous iteration of this loop did not delete the Value.
6353       if (llvm::any_of(Ops, [&R](Value *V) {
6354             auto *I = dyn_cast<Instruction>(V);
6355             return I && R.isDeleted(I);
6356           }))
6357         continue;
6358 
6359       LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "
6360                         << "\n");
6361 
6362       R.buildTree(Ops);
6363       Optional<ArrayRef<unsigned>> Order = R.bestOrder();
6364       // TODO: check if we can allow reordering for more cases.
6365       if (AllowReorder && Order) {
6366         // TODO: reorder tree nodes without tree rebuilding.
6367         // Conceptually, there is nothing actually preventing us from trying to
6368         // reorder a larger list. In fact, we do exactly this when vectorizing
6369         // reductions. However, at this point, we only expect to get here when
6370         // there are exactly two operations.
6371         assert(Ops.size() == 2);
6372         Value *ReorderedOps[] = {Ops[1], Ops[0]};
6373         R.buildTree(ReorderedOps, None);
6374       }
6375       if (R.isTreeTinyAndNotFullyVectorizable())
6376         continue;
6377 
6378       R.computeMinimumValueSizes();
6379       InstructionCost Cost = R.getTreeCost();
6380       CandidateFound = true;
6381       if (CompensateUseCost) {
6382         // TODO: Use TTI's getScalarizationOverhead for sequence of inserts
6383         // rather than sum of single inserts as the latter may overestimate
6384         // cost. This work should imply improving cost estimation for extracts
6385         // that added in for external (for vectorization tree) users,i.e. that
6386         // part should also switch to same interface.
6387         // For example, the following case is projected code after SLP:
6388         //  %4 = extractelement <4 x i64> %3, i32 0
6389         //  %v0 = insertelement <4 x i64> poison, i64 %4, i32 0
6390         //  %5 = extractelement <4 x i64> %3, i32 1
6391         //  %v1 = insertelement <4 x i64> %v0, i64 %5, i32 1
6392         //  %6 = extractelement <4 x i64> %3, i32 2
6393         //  %v2 = insertelement <4 x i64> %v1, i64 %6, i32 2
6394         //  %7 = extractelement <4 x i64> %3, i32 3
6395         //  %v3 = insertelement <4 x i64> %v2, i64 %7, i32 3
6396         //
6397         // Extracts here added by SLP in order to feed users (the inserts) of
6398         // original scalars and contribute to "ExtractCost" at cost evaluation.
6399         // The inserts in turn form sequence to build an aggregate that
6400         // detected by findBuildAggregate routine.
6401         // SLP makes an assumption that such sequence will be optimized away
6402         // later (instcombine) so it tries to compensate ExctractCost with
6403         // cost of insert sequence.
6404         // Current per element cost calculation approach is not quite accurate
6405         // and tends to create bias toward favoring vectorization.
6406         // Switching to the TTI interface might help a bit.
6407         // Alternative solution could be pattern-match to detect a no-op or
6408         // shuffle.
6409         InstructionCost UserCost = 0;
6410         for (unsigned Lane = 0; Lane < OpsWidth; Lane++) {
6411           auto *IE = cast<InsertElementInst>(InsertUses[I + Lane]);
6412           if (auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2)))
6413             UserCost += TTI->getVectorInstrCost(
6414                 Instruction::InsertElement, IE->getType(), CI->getZExtValue());
6415         }
6416         LLVM_DEBUG(dbgs() << "SLP: Compensate cost of users by: " << UserCost
6417                           << ".\n");
6418         Cost -= UserCost;
6419       }
6420 
6421       MinCost = std::min(MinCost, Cost);
6422 
6423       if (Cost < -SLPCostThreshold) {
6424         LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n");
6425         R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList",
6426                                                     cast<Instruction>(Ops[0]))
6427                                  << "SLP vectorized with cost " << ore::NV("Cost", Cost)
6428                                  << " and with tree size "
6429                                  << ore::NV("TreeSize", R.getTreeSize()));
6430 
6431         R.vectorizeTree();
6432         // Move to the next bundle.
6433         I += VF - 1;
6434         NextInst = I + 1;
6435         Changed = true;
6436       }
6437     }
6438   }
6439 
6440   if (!Changed && CandidateFound) {
6441     R.getORE()->emit([&]() {
6442       return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0)
6443              << "List vectorization was possible but not beneficial with cost "
6444              << ore::NV("Cost", MinCost) << " >= "
6445              << ore::NV("Treshold", -SLPCostThreshold);
6446     });
6447   } else if (!Changed) {
6448     R.getORE()->emit([&]() {
6449       return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0)
6450              << "Cannot SLP vectorize list: vectorization was impossible"
6451              << " with available vectorization factors";
6452     });
6453   }
6454   return Changed;
6455 }
6456 
6457 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) {
6458   if (!I)
6459     return false;
6460 
6461   if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I))
6462     return false;
6463 
6464   Value *P = I->getParent();
6465 
6466   // Vectorize in current basic block only.
6467   auto *Op0 = dyn_cast<Instruction>(I->getOperand(0));
6468   auto *Op1 = dyn_cast<Instruction>(I->getOperand(1));
6469   if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P)
6470     return false;
6471 
6472   // Try to vectorize V.
6473   if (tryToVectorizePair(Op0, Op1, R))
6474     return true;
6475 
6476   auto *A = dyn_cast<BinaryOperator>(Op0);
6477   auto *B = dyn_cast<BinaryOperator>(Op1);
6478   // Try to skip B.
6479   if (B && B->hasOneUse()) {
6480     auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0));
6481     auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1));
6482     if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R))
6483       return true;
6484     if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R))
6485       return true;
6486   }
6487 
6488   // Try to skip A.
6489   if (A && A->hasOneUse()) {
6490     auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0));
6491     auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1));
6492     if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R))
6493       return true;
6494     if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R))
6495       return true;
6496   }
6497   return false;
6498 }
6499 
6500 namespace {
6501 
6502 /// Model horizontal reductions.
6503 ///
6504 /// A horizontal reduction is a tree of reduction instructions that has values
6505 /// that can be put into a vector as its leaves. For example:
6506 ///
6507 /// mul mul mul mul
6508 ///  \  /    \  /
6509 ///   +       +
6510 ///    \     /
6511 ///       +
6512 /// This tree has "mul" as its leaf values and "+" as its reduction
6513 /// instructions. A reduction can feed into a store or a binary operation
6514 /// feeding a phi.
6515 ///    ...
6516 ///    \  /
6517 ///     +
6518 ///     |
6519 ///  phi +=
6520 ///
6521 ///  Or:
6522 ///    ...
6523 ///    \  /
6524 ///     +
6525 ///     |
6526 ///   *p =
6527 ///
6528 class HorizontalReduction {
6529   using ReductionOpsType = SmallVector<Value *, 16>;
6530   using ReductionOpsListType = SmallVector<ReductionOpsType, 2>;
6531   ReductionOpsListType ReductionOps;
6532   SmallVector<Value *, 32> ReducedVals;
6533   // Use map vector to make stable output.
6534   MapVector<Instruction *, Value *> ExtraArgs;
6535   WeakTrackingVH ReductionRoot;
6536   /// The type of reduction operation.
6537   RecurKind RdxKind;
6538 
6539   /// Checks if instruction is associative and can be vectorized.
6540   static bool isVectorizable(RecurKind Kind, Instruction *I) {
6541     if (Kind == RecurKind::None)
6542       return false;
6543     if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind))
6544       return true;
6545 
6546     if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) {
6547       // FP min/max are associative except for NaN and -0.0. We do not
6548       // have to rule out -0.0 here because the intrinsic semantics do not
6549       // specify a fixed result for it.
6550       return I->getFastMathFlags().noNaNs();
6551     }
6552 
6553     return I->isAssociative();
6554   }
6555 
6556   /// Checks if the ParentStackElem.first should be marked as a reduction
6557   /// operation with an extra argument or as extra argument itself.
6558   void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem,
6559                     Value *ExtraArg) {
6560     if (ExtraArgs.count(ParentStackElem.first)) {
6561       ExtraArgs[ParentStackElem.first] = nullptr;
6562       // We ran into something like:
6563       // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg.
6564       // The whole ParentStackElem.first should be considered as an extra value
6565       // in this case.
6566       // Do not perform analysis of remaining operands of ParentStackElem.first
6567       // instruction, this whole instruction is an extra argument.
6568       ParentStackElem.second = getNumberOfOperands(ParentStackElem.first);
6569     } else {
6570       // We ran into something like:
6571       // ParentStackElem.first += ... + ExtraArg + ...
6572       ExtraArgs[ParentStackElem.first] = ExtraArg;
6573     }
6574   }
6575 
6576   /// Creates reduction operation with the current opcode.
6577   static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS,
6578                          Value *RHS, const Twine &Name, bool UseSelect) {
6579     unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind);
6580     switch (Kind) {
6581     case RecurKind::Add:
6582     case RecurKind::Mul:
6583     case RecurKind::Or:
6584     case RecurKind::And:
6585     case RecurKind::Xor:
6586     case RecurKind::FAdd:
6587     case RecurKind::FMul:
6588       return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
6589                                  Name);
6590     case RecurKind::FMax:
6591       return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS);
6592     case RecurKind::FMin:
6593       return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS);
6594     case RecurKind::SMax:
6595       if (UseSelect) {
6596         Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name);
6597         return Builder.CreateSelect(Cmp, LHS, RHS, Name);
6598       }
6599       return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS);
6600     case RecurKind::SMin:
6601       if (UseSelect) {
6602         Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name);
6603         return Builder.CreateSelect(Cmp, LHS, RHS, Name);
6604       }
6605       return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS);
6606     case RecurKind::UMax:
6607       if (UseSelect) {
6608         Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name);
6609         return Builder.CreateSelect(Cmp, LHS, RHS, Name);
6610       }
6611       return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS);
6612     case RecurKind::UMin:
6613       if (UseSelect) {
6614         Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name);
6615         return Builder.CreateSelect(Cmp, LHS, RHS, Name);
6616       }
6617       return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS);
6618     default:
6619       llvm_unreachable("Unknown reduction operation.");
6620     }
6621   }
6622 
6623   /// Creates reduction operation with the current opcode with the IR flags
6624   /// from \p ReductionOps.
6625   static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS,
6626                          Value *RHS, const Twine &Name,
6627                          const ReductionOpsListType &ReductionOps) {
6628     bool UseSelect = ReductionOps.size() == 2;
6629     assert((!UseSelect || isa<SelectInst>(ReductionOps[1][0])) &&
6630            "Expected cmp + select pairs for reduction");
6631     Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect);
6632     if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) {
6633       if (auto *Sel = dyn_cast<SelectInst>(Op)) {
6634         propagateIRFlags(Sel->getCondition(), ReductionOps[0]);
6635         propagateIRFlags(Op, ReductionOps[1]);
6636         return Op;
6637       }
6638     }
6639     propagateIRFlags(Op, ReductionOps[0]);
6640     return Op;
6641   }
6642   /// Creates reduction operation with the current opcode with the IR flags
6643   /// from \p I.
6644   static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS,
6645                          Value *RHS, const Twine &Name, Instruction *I) {
6646     auto *SelI = dyn_cast<SelectInst>(I);
6647     Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, SelI != nullptr);
6648     if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) {
6649       if (auto *Sel = dyn_cast<SelectInst>(Op))
6650           propagateIRFlags(Sel->getCondition(), SelI->getCondition());
6651     }
6652     propagateIRFlags(Op, I);
6653     return Op;
6654   }
6655 
6656   static RecurKind getRdxKind(Instruction *I) {
6657     assert(I && "Expected instruction for reduction matching");
6658     TargetTransformInfo::ReductionFlags RdxFlags;
6659     if (match(I, m_Add(m_Value(), m_Value())))
6660       return RecurKind::Add;
6661     if (match(I, m_Mul(m_Value(), m_Value())))
6662       return RecurKind::Mul;
6663     if (match(I, m_And(m_Value(), m_Value())))
6664       return RecurKind::And;
6665     if (match(I, m_Or(m_Value(), m_Value())))
6666       return RecurKind::Or;
6667     if (match(I, m_Xor(m_Value(), m_Value())))
6668       return RecurKind::Xor;
6669     if (match(I, m_FAdd(m_Value(), m_Value())))
6670       return RecurKind::FAdd;
6671     if (match(I, m_FMul(m_Value(), m_Value())))
6672       return RecurKind::FMul;
6673 
6674     if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value())))
6675       return RecurKind::FMax;
6676     if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value())))
6677       return RecurKind::FMin;
6678 
6679     // This matches either cmp+select or intrinsics. SLP is expected to handle
6680     // either form.
6681     // TODO: If we are canonicalizing to intrinsics, we can remove several
6682     //       special-case paths that deal with selects.
6683     if (match(I, m_SMax(m_Value(), m_Value())))
6684       return RecurKind::SMax;
6685     if (match(I, m_SMin(m_Value(), m_Value())))
6686       return RecurKind::SMin;
6687     if (match(I, m_UMax(m_Value(), m_Value())))
6688       return RecurKind::UMax;
6689     if (match(I, m_UMin(m_Value(), m_Value())))
6690       return RecurKind::UMin;
6691 
6692     if (auto *Select = dyn_cast<SelectInst>(I)) {
6693       // Try harder: look for min/max pattern based on instructions producing
6694       // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2).
6695       // During the intermediate stages of SLP, it's very common to have
6696       // pattern like this (since optimizeGatherSequence is run only once
6697       // at the end):
6698       // %1 = extractelement <2 x i32> %a, i32 0
6699       // %2 = extractelement <2 x i32> %a, i32 1
6700       // %cond = icmp sgt i32 %1, %2
6701       // %3 = extractelement <2 x i32> %a, i32 0
6702       // %4 = extractelement <2 x i32> %a, i32 1
6703       // %select = select i1 %cond, i32 %3, i32 %4
6704       CmpInst::Predicate Pred;
6705       Instruction *L1;
6706       Instruction *L2;
6707 
6708       Value *LHS = Select->getTrueValue();
6709       Value *RHS = Select->getFalseValue();
6710       Value *Cond = Select->getCondition();
6711 
6712       // TODO: Support inverse predicates.
6713       if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) {
6714         if (!isa<ExtractElementInst>(RHS) ||
6715             !L2->isIdenticalTo(cast<Instruction>(RHS)))
6716           return RecurKind::None;
6717       } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) {
6718         if (!isa<ExtractElementInst>(LHS) ||
6719             !L1->isIdenticalTo(cast<Instruction>(LHS)))
6720           return RecurKind::None;
6721       } else {
6722         if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS))
6723           return RecurKind::None;
6724         if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) ||
6725             !L1->isIdenticalTo(cast<Instruction>(LHS)) ||
6726             !L2->isIdenticalTo(cast<Instruction>(RHS)))
6727           return RecurKind::None;
6728       }
6729 
6730       TargetTransformInfo::ReductionFlags RdxFlags;
6731       switch (Pred) {
6732       default:
6733         return RecurKind::None;
6734       case CmpInst::ICMP_SGT:
6735       case CmpInst::ICMP_SGE:
6736         return RecurKind::SMax;
6737       case CmpInst::ICMP_SLT:
6738       case CmpInst::ICMP_SLE:
6739         return RecurKind::SMin;
6740       case CmpInst::ICMP_UGT:
6741       case CmpInst::ICMP_UGE:
6742         return RecurKind::UMax;
6743       case CmpInst::ICMP_ULT:
6744       case CmpInst::ICMP_ULE:
6745         return RecurKind::UMin;
6746       }
6747     }
6748     return RecurKind::None;
6749   }
6750 
6751   /// Get the index of the first operand.
6752   static unsigned getFirstOperandIndex(Instruction *I) {
6753     return isa<SelectInst>(I) ? 1 : 0;
6754   }
6755 
6756   /// Total number of operands in the reduction operation.
6757   static unsigned getNumberOfOperands(Instruction *I) {
6758     return isa<SelectInst>(I) ? 3 : 2;
6759   }
6760 
6761   /// Checks if the instruction is in basic block \p BB.
6762   /// For a min/max reduction check that both compare and select are in \p BB.
6763   static bool hasSameParent(Instruction *I, BasicBlock *BB, bool IsRedOp) {
6764     auto *Sel = dyn_cast<SelectInst>(I);
6765     if (IsRedOp && Sel) {
6766       auto *Cmp = cast<Instruction>(Sel->getCondition());
6767       return Sel->getParent() == BB && Cmp->getParent() == BB;
6768     }
6769     return I->getParent() == BB;
6770   }
6771 
6772   /// Expected number of uses for reduction operations/reduced values.
6773   static bool hasRequiredNumberOfUses(bool MatchCmpSel, Instruction *I) {
6774     // SelectInst must be used twice while the condition op must have single
6775     // use only.
6776     if (MatchCmpSel) {
6777       if (auto *Sel = dyn_cast<SelectInst>(I))
6778         return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse();
6779       return I->hasNUses(2);
6780     }
6781 
6782     // Arithmetic reduction operation must be used once only.
6783     return I->hasOneUse();
6784   }
6785 
6786   /// Initializes the list of reduction operations.
6787   void initReductionOps(Instruction *I) {
6788     if (isa<SelectInst>(I))
6789       ReductionOps.assign(2, ReductionOpsType());
6790     else
6791       ReductionOps.assign(1, ReductionOpsType());
6792   }
6793 
6794   /// Add all reduction operations for the reduction instruction \p I.
6795   void addReductionOps(Instruction *I) {
6796     if (auto *Sel = dyn_cast<SelectInst>(I)) {
6797       ReductionOps[0].emplace_back(Sel->getCondition());
6798       ReductionOps[1].emplace_back(Sel);
6799     } else {
6800       ReductionOps[0].emplace_back(I);
6801     }
6802   }
6803 
6804   static Value *getLHS(RecurKind Kind, Instruction *I) {
6805     if (Kind == RecurKind::None)
6806       return nullptr;
6807     return I->getOperand(getFirstOperandIndex(I));
6808   }
6809   static Value *getRHS(RecurKind Kind, Instruction *I) {
6810     if (Kind == RecurKind::None)
6811       return nullptr;
6812     return I->getOperand(getFirstOperandIndex(I) + 1);
6813   }
6814 
6815 public:
6816   HorizontalReduction() = default;
6817 
6818   /// Try to find a reduction tree.
6819   bool matchAssociativeReduction(PHINode *Phi, Instruction *B) {
6820     assert((!Phi || is_contained(Phi->operands(), B)) &&
6821            "Phi needs to use the binary operator");
6822 
6823     RdxKind = getRdxKind(B);
6824 
6825     // We could have a initial reductions that is not an add.
6826     //  r *= v1 + v2 + v3 + v4
6827     // In such a case start looking for a tree rooted in the first '+'.
6828     if (Phi) {
6829       if (getLHS(RdxKind, B) == Phi) {
6830         Phi = nullptr;
6831         B = dyn_cast<Instruction>(getRHS(RdxKind, B));
6832         if (!B)
6833           return false;
6834         RdxKind = getRdxKind(B);
6835       } else if (getRHS(RdxKind, B) == Phi) {
6836         Phi = nullptr;
6837         B = dyn_cast<Instruction>(getLHS(RdxKind, B));
6838         if (!B)
6839           return false;
6840         RdxKind = getRdxKind(B);
6841       }
6842     }
6843 
6844     if (!isVectorizable(RdxKind, B))
6845       return false;
6846 
6847     // Analyze "regular" integer/FP types for reductions - no target-specific
6848     // types or pointers.
6849     Type *Ty = B->getType();
6850     if (!isValidElementType(Ty) || Ty->isPointerTy())
6851       return false;
6852 
6853     ReductionRoot = B;
6854 
6855     // The opcode for leaf values that we perform a reduction on.
6856     // For example: load(x) + load(y) + load(z) + fptoui(w)
6857     // The leaf opcode for 'w' does not match, so we don't include it as a
6858     // potential candidate for the reduction.
6859     unsigned LeafOpcode = 0;
6860 
6861     // Post order traverse the reduction tree starting at B. We only handle true
6862     // trees containing only binary operators.
6863     SmallVector<std::pair<Instruction *, unsigned>, 32> Stack;
6864     Stack.push_back(std::make_pair(B, getFirstOperandIndex(B)));
6865     initReductionOps(B);
6866     while (!Stack.empty()) {
6867       Instruction *TreeN = Stack.back().first;
6868       unsigned EdgeToVisit = Stack.back().second++;
6869       const RecurKind TreeRdxKind = getRdxKind(TreeN);
6870       bool IsReducedValue = TreeRdxKind != RdxKind;
6871 
6872       // Postorder visit.
6873       if (IsReducedValue || EdgeToVisit == getNumberOfOperands(TreeN)) {
6874         if (IsReducedValue)
6875           ReducedVals.push_back(TreeN);
6876         else {
6877           auto ExtraArgsIter = ExtraArgs.find(TreeN);
6878           if (ExtraArgsIter != ExtraArgs.end() && !ExtraArgsIter->second) {
6879             // Check if TreeN is an extra argument of its parent operation.
6880             if (Stack.size() <= 1) {
6881               // TreeN can't be an extra argument as it is a root reduction
6882               // operation.
6883               return false;
6884             }
6885             // Yes, TreeN is an extra argument, do not add it to a list of
6886             // reduction operations.
6887             // Stack[Stack.size() - 2] always points to the parent operation.
6888             markExtraArg(Stack[Stack.size() - 2], TreeN);
6889             ExtraArgs.erase(TreeN);
6890           } else
6891             addReductionOps(TreeN);
6892         }
6893         // Retract.
6894         Stack.pop_back();
6895         continue;
6896       }
6897 
6898       // Visit left or right.
6899       Value *EdgeVal = TreeN->getOperand(EdgeToVisit);
6900       auto *EdgeInst = dyn_cast<Instruction>(EdgeVal);
6901       if (!EdgeInst) {
6902         // Edge value is not a reduction instruction or a leaf instruction.
6903         // (It may be a constant, function argument, or something else.)
6904         markExtraArg(Stack.back(), EdgeVal);
6905         continue;
6906       }
6907       RecurKind EdgeRdxKind = getRdxKind(EdgeInst);
6908       // Continue analysis if the next operand is a reduction operation or
6909       // (possibly) a leaf value. If the leaf value opcode is not set,
6910       // the first met operation != reduction operation is considered as the
6911       // leaf opcode.
6912       // Only handle trees in the current basic block.
6913       // Each tree node needs to have minimal number of users except for the
6914       // ultimate reduction.
6915       const bool IsRdxInst = EdgeRdxKind == RdxKind;
6916       if (EdgeInst != Phi && EdgeInst != B &&
6917           hasSameParent(EdgeInst, B->getParent(), IsRdxInst) &&
6918           hasRequiredNumberOfUses(isa<SelectInst>(B), EdgeInst) &&
6919           (!LeafOpcode || LeafOpcode == EdgeInst->getOpcode() || IsRdxInst)) {
6920         if (IsRdxInst) {
6921           // We need to be able to reassociate the reduction operations.
6922           if (!isVectorizable(EdgeRdxKind, EdgeInst)) {
6923             // I is an extra argument for TreeN (its parent operation).
6924             markExtraArg(Stack.back(), EdgeInst);
6925             continue;
6926           }
6927         } else if (!LeafOpcode) {
6928           LeafOpcode = EdgeInst->getOpcode();
6929         }
6930         Stack.push_back(
6931             std::make_pair(EdgeInst, getFirstOperandIndex(EdgeInst)));
6932         continue;
6933       }
6934       // I is an extra argument for TreeN (its parent operation).
6935       markExtraArg(Stack.back(), EdgeInst);
6936     }
6937     return true;
6938   }
6939 
6940   /// Attempt to vectorize the tree found by matchAssociativeReduction.
6941   bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) {
6942     // If there are a sufficient number of reduction values, reduce
6943     // to a nearby power-of-2. We can safely generate oversized
6944     // vectors and rely on the backend to split them to legal sizes.
6945     unsigned NumReducedVals = ReducedVals.size();
6946     if (NumReducedVals < 4)
6947       return false;
6948 
6949     // Intersect the fast-math-flags from all reduction operations.
6950     FastMathFlags RdxFMF;
6951     RdxFMF.set();
6952     for (ReductionOpsType &RdxOp : ReductionOps) {
6953       for (Value *RdxVal : RdxOp) {
6954         if (auto *FPMO = dyn_cast<FPMathOperator>(RdxVal))
6955           RdxFMF &= FPMO->getFastMathFlags();
6956       }
6957     }
6958 
6959     IRBuilder<> Builder(cast<Instruction>(ReductionRoot));
6960     Builder.setFastMathFlags(RdxFMF);
6961 
6962     BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues;
6963     // The same extra argument may be used several times, so log each attempt
6964     // to use it.
6965     for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) {
6966       assert(Pair.first && "DebugLoc must be set.");
6967       ExternallyUsedValues[Pair.second].push_back(Pair.first);
6968     }
6969 
6970     // The compare instruction of a min/max is the insertion point for new
6971     // instructions and may be replaced with a new compare instruction.
6972     auto getCmpForMinMaxReduction = [](Instruction *RdxRootInst) {
6973       assert(isa<SelectInst>(RdxRootInst) &&
6974              "Expected min/max reduction to have select root instruction");
6975       Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition();
6976       assert(isa<Instruction>(ScalarCond) &&
6977              "Expected min/max reduction to have compare condition");
6978       return cast<Instruction>(ScalarCond);
6979     };
6980 
6981     // The reduction root is used as the insertion point for new instructions,
6982     // so set it as externally used to prevent it from being deleted.
6983     ExternallyUsedValues[ReductionRoot];
6984     SmallVector<Value *, 16> IgnoreList;
6985     for (ReductionOpsType &RdxOp : ReductionOps)
6986       IgnoreList.append(RdxOp.begin(), RdxOp.end());
6987 
6988     unsigned ReduxWidth = PowerOf2Floor(NumReducedVals);
6989     if (NumReducedVals > ReduxWidth) {
6990       // In the loop below, we are building a tree based on a window of
6991       // 'ReduxWidth' values.
6992       // If the operands of those values have common traits (compare predicate,
6993       // constant operand, etc), then we want to group those together to
6994       // minimize the cost of the reduction.
6995 
6996       // TODO: This should be extended to count common operands for
6997       //       compares and binops.
6998 
6999       // Step 1: Count the number of times each compare predicate occurs.
7000       SmallDenseMap<unsigned, unsigned> PredCountMap;
7001       for (Value *RdxVal : ReducedVals) {
7002         CmpInst::Predicate Pred;
7003         if (match(RdxVal, m_Cmp(Pred, m_Value(), m_Value())))
7004           ++PredCountMap[Pred];
7005       }
7006       // Step 2: Sort the values so the most common predicates come first.
7007       stable_sort(ReducedVals, [&PredCountMap](Value *A, Value *B) {
7008         CmpInst::Predicate PredA, PredB;
7009         if (match(A, m_Cmp(PredA, m_Value(), m_Value())) &&
7010             match(B, m_Cmp(PredB, m_Value(), m_Value()))) {
7011           return PredCountMap[PredA] > PredCountMap[PredB];
7012         }
7013         return false;
7014       });
7015     }
7016 
7017     Value *VectorizedTree = nullptr;
7018     unsigned i = 0;
7019     while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) {
7020       ArrayRef<Value *> VL(&ReducedVals[i], ReduxWidth);
7021       V.buildTree(VL, ExternallyUsedValues, IgnoreList);
7022       Optional<ArrayRef<unsigned>> Order = V.bestOrder();
7023       if (Order) {
7024         assert(Order->size() == VL.size() &&
7025                "Order size must be the same as number of vectorized "
7026                "instructions.");
7027         // TODO: reorder tree nodes without tree rebuilding.
7028         SmallVector<Value *, 4> ReorderedOps(VL.size());
7029         llvm::transform(*Order, ReorderedOps.begin(),
7030                         [VL](const unsigned Idx) { return VL[Idx]; });
7031         V.buildTree(ReorderedOps, ExternallyUsedValues, IgnoreList);
7032       }
7033       if (V.isTreeTinyAndNotFullyVectorizable())
7034         break;
7035       if (V.isLoadCombineReductionCandidate(RdxKind))
7036         break;
7037 
7038       V.computeMinimumValueSizes();
7039 
7040       // Estimate cost.
7041       InstructionCost TreeCost = V.getTreeCost();
7042       InstructionCost ReductionCost =
7043           getReductionCost(TTI, ReducedVals[i], ReduxWidth);
7044       InstructionCost Cost = TreeCost + ReductionCost;
7045       if (!Cost.isValid()) {
7046         LLVM_DEBUG(dbgs() << "Encountered invalid baseline cost.\n");
7047         return false;
7048       }
7049       if (Cost >= -SLPCostThreshold) {
7050         V.getORE()->emit([&]() {
7051           return OptimizationRemarkMissed(SV_NAME, "HorSLPNotBeneficial",
7052                                           cast<Instruction>(VL[0]))
7053                  << "Vectorizing horizontal reduction is possible"
7054                  << "but not beneficial with cost " << ore::NV("Cost", Cost)
7055                  << " and threshold "
7056                  << ore::NV("Threshold", -SLPCostThreshold);
7057         });
7058         break;
7059       }
7060 
7061       LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:"
7062                         << Cost << ". (HorRdx)\n");
7063       V.getORE()->emit([&]() {
7064         return OptimizationRemark(SV_NAME, "VectorizedHorizontalReduction",
7065                                   cast<Instruction>(VL[0]))
7066                << "Vectorized horizontal reduction with cost "
7067                << ore::NV("Cost", Cost) << " and with tree size "
7068                << ore::NV("TreeSize", V.getTreeSize());
7069       });
7070 
7071       // Vectorize a tree.
7072       DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc();
7073       Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues);
7074 
7075       // Emit a reduction. If the root is a select (min/max idiom), the insert
7076       // point is the compare condition of that select.
7077       Instruction *RdxRootInst = cast<Instruction>(ReductionRoot);
7078       if (isa<SelectInst>(RdxRootInst))
7079         Builder.SetInsertPoint(getCmpForMinMaxReduction(RdxRootInst));
7080       else
7081         Builder.SetInsertPoint(RdxRootInst);
7082 
7083       Value *ReducedSubTree =
7084           emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI);
7085 
7086       if (!VectorizedTree) {
7087         // Initialize the final value in the reduction.
7088         VectorizedTree = ReducedSubTree;
7089       } else {
7090         // Update the final value in the reduction.
7091         Builder.SetCurrentDebugLocation(Loc);
7092         VectorizedTree = createOp(Builder, RdxKind, VectorizedTree,
7093                                   ReducedSubTree, "op.rdx", ReductionOps);
7094       }
7095       i += ReduxWidth;
7096       ReduxWidth = PowerOf2Floor(NumReducedVals - i);
7097     }
7098 
7099     if (VectorizedTree) {
7100       // Finish the reduction.
7101       for (; i < NumReducedVals; ++i) {
7102         auto *I = cast<Instruction>(ReducedVals[i]);
7103         Builder.SetCurrentDebugLocation(I->getDebugLoc());
7104         VectorizedTree =
7105             createOp(Builder, RdxKind, VectorizedTree, I, "", ReductionOps);
7106       }
7107       for (auto &Pair : ExternallyUsedValues) {
7108         // Add each externally used value to the final reduction.
7109         for (auto *I : Pair.second) {
7110           Builder.SetCurrentDebugLocation(I->getDebugLoc());
7111           VectorizedTree = createOp(Builder, RdxKind, VectorizedTree,
7112                                     Pair.first, "op.extra", I);
7113         }
7114       }
7115 
7116       // Update users. For a min/max reduction that ends with a compare and
7117       // select, we also have to RAUW for the compare instruction feeding the
7118       // reduction root. That's because the original compare may have extra uses
7119       // besides the final select of the reduction.
7120       if (auto *ScalarSelect = dyn_cast<SelectInst>(ReductionRoot)) {
7121         if (auto *VecSelect = dyn_cast<SelectInst>(VectorizedTree)) {
7122           Instruction *ScalarCmp = getCmpForMinMaxReduction(ScalarSelect);
7123           ScalarCmp->replaceAllUsesWith(VecSelect->getCondition());
7124         }
7125       }
7126       ReductionRoot->replaceAllUsesWith(VectorizedTree);
7127 
7128       // Mark all scalar reduction ops for deletion, they are replaced by the
7129       // vector reductions.
7130       V.eraseInstructions(IgnoreList);
7131     }
7132     return VectorizedTree != nullptr;
7133   }
7134 
7135   unsigned numReductionValues() const { return ReducedVals.size(); }
7136 
7137 private:
7138   /// Calculate the cost of a reduction.
7139   InstructionCost getReductionCost(TargetTransformInfo *TTI,
7140                                    Value *FirstReducedVal,
7141                                    unsigned ReduxWidth) {
7142     Type *ScalarTy = FirstReducedVal->getType();
7143     FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth);
7144     InstructionCost VectorCost, ScalarCost;
7145     switch (RdxKind) {
7146     case RecurKind::Add:
7147     case RecurKind::Mul:
7148     case RecurKind::Or:
7149     case RecurKind::And:
7150     case RecurKind::Xor:
7151     case RecurKind::FAdd:
7152     case RecurKind::FMul: {
7153       unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind);
7154       VectorCost = TTI->getArithmeticReductionCost(RdxOpcode, VectorTy,
7155                                                    /*IsPairwiseForm=*/false);
7156       ScalarCost = TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy);
7157       break;
7158     }
7159     case RecurKind::FMax:
7160     case RecurKind::FMin: {
7161       auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy));
7162       VectorCost =
7163           TTI->getMinMaxReductionCost(VectorTy, VecCondTy,
7164                                       /*pairwise=*/false, /*unsigned=*/false);
7165       ScalarCost =
7166           TTI->getCmpSelInstrCost(Instruction::FCmp, ScalarTy) +
7167           TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
7168                                   CmpInst::makeCmpResultType(ScalarTy));
7169       break;
7170     }
7171     case RecurKind::SMax:
7172     case RecurKind::SMin:
7173     case RecurKind::UMax:
7174     case RecurKind::UMin: {
7175       auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy));
7176       bool IsUnsigned =
7177           RdxKind == RecurKind::UMax || RdxKind == RecurKind::UMin;
7178       VectorCost =
7179           TTI->getMinMaxReductionCost(VectorTy, VecCondTy,
7180                                       /*IsPairwiseForm=*/false, IsUnsigned);
7181       ScalarCost =
7182           TTI->getCmpSelInstrCost(Instruction::ICmp, ScalarTy) +
7183           TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
7184                                   CmpInst::makeCmpResultType(ScalarTy));
7185       break;
7186     }
7187     default:
7188       llvm_unreachable("Expected arithmetic or min/max reduction operation");
7189     }
7190 
7191     // Scalar cost is repeated for N-1 elements.
7192     ScalarCost *= (ReduxWidth - 1);
7193     LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost
7194                       << " for reduction that starts with " << *FirstReducedVal
7195                       << " (It is a splitting reduction)\n");
7196     return VectorCost - ScalarCost;
7197   }
7198 
7199   /// Emit a horizontal reduction of the vectorized value.
7200   Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder,
7201                        unsigned ReduxWidth, const TargetTransformInfo *TTI) {
7202     assert(VectorizedValue && "Need to have a vectorized tree node");
7203     assert(isPowerOf2_32(ReduxWidth) &&
7204            "We only handle power-of-two reductions for now");
7205 
7206     return createSimpleTargetReduction(Builder, TTI, VectorizedValue, RdxKind,
7207                                        ReductionOps.back());
7208   }
7209 };
7210 
7211 } // end anonymous namespace
7212 
7213 static Optional<unsigned> getAggregateSize(Instruction *InsertInst) {
7214   if (auto *IE = dyn_cast<InsertElementInst>(InsertInst))
7215     return cast<FixedVectorType>(IE->getType())->getNumElements();
7216 
7217   unsigned AggregateSize = 1;
7218   auto *IV = cast<InsertValueInst>(InsertInst);
7219   Type *CurrentType = IV->getType();
7220   do {
7221     if (auto *ST = dyn_cast<StructType>(CurrentType)) {
7222       for (auto *Elt : ST->elements())
7223         if (Elt != ST->getElementType(0)) // check homogeneity
7224           return None;
7225       AggregateSize *= ST->getNumElements();
7226       CurrentType = ST->getElementType(0);
7227     } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) {
7228       AggregateSize *= AT->getNumElements();
7229       CurrentType = AT->getElementType();
7230     } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) {
7231       AggregateSize *= VT->getNumElements();
7232       return AggregateSize;
7233     } else if (CurrentType->isSingleValueType()) {
7234       return AggregateSize;
7235     } else {
7236       return None;
7237     }
7238   } while (true);
7239 }
7240 
7241 static Optional<unsigned> getOperandIndex(Instruction *InsertInst,
7242                                           unsigned OperandOffset) {
7243   unsigned OperandIndex = OperandOffset;
7244   if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) {
7245     if (auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2))) {
7246       auto *VT = cast<FixedVectorType>(IE->getType());
7247       OperandIndex *= VT->getNumElements();
7248       OperandIndex += CI->getZExtValue();
7249       return OperandIndex;
7250     }
7251     return None;
7252   }
7253 
7254   auto *IV = cast<InsertValueInst>(InsertInst);
7255   Type *CurrentType = IV->getType();
7256   for (unsigned int Index : IV->indices()) {
7257     if (auto *ST = dyn_cast<StructType>(CurrentType)) {
7258       OperandIndex *= ST->getNumElements();
7259       CurrentType = ST->getElementType(Index);
7260     } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) {
7261       OperandIndex *= AT->getNumElements();
7262       CurrentType = AT->getElementType();
7263     } else {
7264       return None;
7265     }
7266     OperandIndex += Index;
7267   }
7268   return OperandIndex;
7269 }
7270 
7271 static bool findBuildAggregate_rec(Instruction *LastInsertInst,
7272                                    TargetTransformInfo *TTI,
7273                                    SmallVectorImpl<Value *> &BuildVectorOpds,
7274                                    SmallVectorImpl<Value *> &InsertElts,
7275                                    unsigned OperandOffset) {
7276   do {
7277     Value *InsertedOperand = LastInsertInst->getOperand(1);
7278     Optional<unsigned> OperandIndex =
7279         getOperandIndex(LastInsertInst, OperandOffset);
7280     if (!OperandIndex)
7281       return false;
7282     if (isa<InsertElementInst>(InsertedOperand) ||
7283         isa<InsertValueInst>(InsertedOperand)) {
7284       if (!findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI,
7285                                   BuildVectorOpds, InsertElts, *OperandIndex))
7286         return false;
7287     } else {
7288       BuildVectorOpds[*OperandIndex] = InsertedOperand;
7289       InsertElts[*OperandIndex] = LastInsertInst;
7290     }
7291     if (isa<UndefValue>(LastInsertInst->getOperand(0)))
7292       return true;
7293     LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0));
7294   } while (LastInsertInst != nullptr &&
7295            (isa<InsertValueInst>(LastInsertInst) ||
7296             isa<InsertElementInst>(LastInsertInst)) &&
7297            LastInsertInst->hasOneUse());
7298   return false;
7299 }
7300 
7301 /// Recognize construction of vectors like
7302 ///  %ra = insertelement <4 x float> poison, float %s0, i32 0
7303 ///  %rb = insertelement <4 x float> %ra, float %s1, i32 1
7304 ///  %rc = insertelement <4 x float> %rb, float %s2, i32 2
7305 ///  %rd = insertelement <4 x float> %rc, float %s3, i32 3
7306 ///  starting from the last insertelement or insertvalue instruction.
7307 ///
7308 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>},
7309 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on.
7310 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples.
7311 ///
7312 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type.
7313 ///
7314 /// \return true if it matches.
7315 static bool findBuildAggregate(Instruction *LastInsertInst,
7316                                TargetTransformInfo *TTI,
7317                                SmallVectorImpl<Value *> &BuildVectorOpds,
7318                                SmallVectorImpl<Value *> &InsertElts) {
7319 
7320   assert((isa<InsertElementInst>(LastInsertInst) ||
7321           isa<InsertValueInst>(LastInsertInst)) &&
7322          "Expected insertelement or insertvalue instruction!");
7323 
7324   assert((BuildVectorOpds.empty() && InsertElts.empty()) &&
7325          "Expected empty result vectors!");
7326 
7327   Optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst);
7328   if (!AggregateSize)
7329     return false;
7330   BuildVectorOpds.resize(*AggregateSize);
7331   InsertElts.resize(*AggregateSize);
7332 
7333   if (findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts,
7334                              0)) {
7335     llvm::erase_value(BuildVectorOpds, nullptr);
7336     llvm::erase_value(InsertElts, nullptr);
7337     if (BuildVectorOpds.size() >= 2)
7338       return true;
7339   }
7340 
7341   return false;
7342 }
7343 
7344 static bool PhiTypeSorterFunc(Value *V, Value *V2) {
7345   return V->getType() < V2->getType();
7346 }
7347 
7348 /// Try and get a reduction value from a phi node.
7349 ///
7350 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions
7351 /// if they come from either \p ParentBB or a containing loop latch.
7352 ///
7353 /// \returns A candidate reduction value if possible, or \code nullptr \endcode
7354 /// if not possible.
7355 static Value *getReductionValue(const DominatorTree *DT, PHINode *P,
7356                                 BasicBlock *ParentBB, LoopInfo *LI) {
7357   // There are situations where the reduction value is not dominated by the
7358   // reduction phi. Vectorizing such cases has been reported to cause
7359   // miscompiles. See PR25787.
7360   auto DominatedReduxValue = [&](Value *R) {
7361     return isa<Instruction>(R) &&
7362            DT->dominates(P->getParent(), cast<Instruction>(R)->getParent());
7363   };
7364 
7365   Value *Rdx = nullptr;
7366 
7367   // Return the incoming value if it comes from the same BB as the phi node.
7368   if (P->getIncomingBlock(0) == ParentBB) {
7369     Rdx = P->getIncomingValue(0);
7370   } else if (P->getIncomingBlock(1) == ParentBB) {
7371     Rdx = P->getIncomingValue(1);
7372   }
7373 
7374   if (Rdx && DominatedReduxValue(Rdx))
7375     return Rdx;
7376 
7377   // Otherwise, check whether we have a loop latch to look at.
7378   Loop *BBL = LI->getLoopFor(ParentBB);
7379   if (!BBL)
7380     return nullptr;
7381   BasicBlock *BBLatch = BBL->getLoopLatch();
7382   if (!BBLatch)
7383     return nullptr;
7384 
7385   // There is a loop latch, return the incoming value if it comes from
7386   // that. This reduction pattern occasionally turns up.
7387   if (P->getIncomingBlock(0) == BBLatch) {
7388     Rdx = P->getIncomingValue(0);
7389   } else if (P->getIncomingBlock(1) == BBLatch) {
7390     Rdx = P->getIncomingValue(1);
7391   }
7392 
7393   if (Rdx && DominatedReduxValue(Rdx))
7394     return Rdx;
7395 
7396   return nullptr;
7397 }
7398 
7399 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) {
7400   if (match(I, m_BinOp(m_Value(V0), m_Value(V1))))
7401     return true;
7402   if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1))))
7403     return true;
7404   if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1))))
7405     return true;
7406   if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1))))
7407     return true;
7408   if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1))))
7409     return true;
7410   if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1))))
7411     return true;
7412   if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1))))
7413     return true;
7414   return false;
7415 }
7416 
7417 /// Attempt to reduce a horizontal reduction.
7418 /// If it is legal to match a horizontal reduction feeding the phi node \a P
7419 /// with reduction operators \a Root (or one of its operands) in a basic block
7420 /// \a BB, then check if it can be done. If horizontal reduction is not found
7421 /// and root instruction is a binary operation, vectorization of the operands is
7422 /// attempted.
7423 /// \returns true if a horizontal reduction was matched and reduced or operands
7424 /// of one of the binary instruction were vectorized.
7425 /// \returns false if a horizontal reduction was not matched (or not possible)
7426 /// or no vectorization of any binary operation feeding \a Root instruction was
7427 /// performed.
7428 static bool tryToVectorizeHorReductionOrInstOperands(
7429     PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R,
7430     TargetTransformInfo *TTI,
7431     const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) {
7432   if (!ShouldVectorizeHor)
7433     return false;
7434 
7435   if (!Root)
7436     return false;
7437 
7438   if (Root->getParent() != BB || isa<PHINode>(Root))
7439     return false;
7440   // Start analysis starting from Root instruction. If horizontal reduction is
7441   // found, try to vectorize it. If it is not a horizontal reduction or
7442   // vectorization is not possible or not effective, and currently analyzed
7443   // instruction is a binary operation, try to vectorize the operands, using
7444   // pre-order DFS traversal order. If the operands were not vectorized, repeat
7445   // the same procedure considering each operand as a possible root of the
7446   // horizontal reduction.
7447   // Interrupt the process if the Root instruction itself was vectorized or all
7448   // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized.
7449   SmallVector<std::pair<Instruction *, unsigned>, 8> Stack(1, {Root, 0});
7450   SmallPtrSet<Value *, 8> VisitedInstrs;
7451   bool Res = false;
7452   while (!Stack.empty()) {
7453     Instruction *Inst;
7454     unsigned Level;
7455     std::tie(Inst, Level) = Stack.pop_back_val();
7456     Value *B0, *B1;
7457     bool IsBinop = matchRdxBop(Inst, B0, B1);
7458     bool IsSelect = match(Inst, m_Select(m_Value(), m_Value(), m_Value()));
7459     if (IsBinop || IsSelect) {
7460       HorizontalReduction HorRdx;
7461       if (HorRdx.matchAssociativeReduction(P, Inst)) {
7462         if (HorRdx.tryToReduce(R, TTI)) {
7463           Res = true;
7464           // Set P to nullptr to avoid re-analysis of phi node in
7465           // matchAssociativeReduction function unless this is the root node.
7466           P = nullptr;
7467           continue;
7468         }
7469       }
7470       if (P && IsBinop) {
7471         Inst = dyn_cast<Instruction>(B0);
7472         if (Inst == P)
7473           Inst = dyn_cast<Instruction>(B1);
7474         if (!Inst) {
7475           // Set P to nullptr to avoid re-analysis of phi node in
7476           // matchAssociativeReduction function unless this is the root node.
7477           P = nullptr;
7478           continue;
7479         }
7480       }
7481     }
7482     // Set P to nullptr to avoid re-analysis of phi node in
7483     // matchAssociativeReduction function unless this is the root node.
7484     P = nullptr;
7485     if (Vectorize(Inst, R)) {
7486       Res = true;
7487       continue;
7488     }
7489 
7490     // Try to vectorize operands.
7491     // Continue analysis for the instruction from the same basic block only to
7492     // save compile time.
7493     if (++Level < RecursionMaxDepth)
7494       for (auto *Op : Inst->operand_values())
7495         if (VisitedInstrs.insert(Op).second)
7496           if (auto *I = dyn_cast<Instruction>(Op))
7497             if (!isa<PHINode>(I) && !R.isDeleted(I) && I->getParent() == BB)
7498               Stack.emplace_back(I, Level);
7499   }
7500   return Res;
7501 }
7502 
7503 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V,
7504                                                  BasicBlock *BB, BoUpSLP &R,
7505                                                  TargetTransformInfo *TTI) {
7506   auto *I = dyn_cast_or_null<Instruction>(V);
7507   if (!I)
7508     return false;
7509 
7510   if (!isa<BinaryOperator>(I))
7511     P = nullptr;
7512   // Try to match and vectorize a horizontal reduction.
7513   auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool {
7514     return tryToVectorize(I, R);
7515   };
7516   return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI,
7517                                                   ExtraVectorization);
7518 }
7519 
7520 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI,
7521                                                  BasicBlock *BB, BoUpSLP &R) {
7522   const DataLayout &DL = BB->getModule()->getDataLayout();
7523   if (!R.canMapToVector(IVI->getType(), DL))
7524     return false;
7525 
7526   SmallVector<Value *, 16> BuildVectorOpds;
7527   SmallVector<Value *, 16> BuildVectorInsts;
7528   if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts))
7529     return false;
7530 
7531   LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n");
7532   // Aggregate value is unlikely to be processed in vector register, we need to
7533   // extract scalars into scalar registers, so NeedExtraction is set true.
7534   return tryToVectorizeList(BuildVectorOpds, R, /*AllowReorder=*/false,
7535                             BuildVectorInsts);
7536 }
7537 
7538 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI,
7539                                                    BasicBlock *BB, BoUpSLP &R) {
7540   SmallVector<Value *, 16> BuildVectorInsts;
7541   SmallVector<Value *, 16> BuildVectorOpds;
7542   SmallVector<int> Mask;
7543   if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) ||
7544       (llvm::all_of(BuildVectorOpds,
7545                     [](Value *V) { return isa<ExtractElementInst>(V); }) &&
7546        isShuffle(BuildVectorOpds, Mask)))
7547     return false;
7548 
7549   // Vectorize starting with the build vector operands ignoring the BuildVector
7550   // instructions for the purpose of scheduling and user extraction.
7551   return tryToVectorizeList(BuildVectorOpds, R, /*AllowReorder=*/false,
7552                             BuildVectorInsts);
7553 }
7554 
7555 bool SLPVectorizerPass::vectorizeCmpInst(CmpInst *CI, BasicBlock *BB,
7556                                          BoUpSLP &R) {
7557   if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R))
7558     return true;
7559 
7560   bool OpsChanged = false;
7561   for (int Idx = 0; Idx < 2; ++Idx) {
7562     OpsChanged |=
7563         vectorizeRootInstruction(nullptr, CI->getOperand(Idx), BB, R, TTI);
7564   }
7565   return OpsChanged;
7566 }
7567 
7568 bool SLPVectorizerPass::vectorizeSimpleInstructions(
7569     SmallVectorImpl<Instruction *> &Instructions, BasicBlock *BB, BoUpSLP &R) {
7570   bool OpsChanged = false;
7571   for (auto *I : reverse(Instructions)) {
7572     if (R.isDeleted(I))
7573       continue;
7574     if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I))
7575       OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R);
7576     else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I))
7577       OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R);
7578     else if (auto *CI = dyn_cast<CmpInst>(I))
7579       OpsChanged |= vectorizeCmpInst(CI, BB, R);
7580   }
7581   Instructions.clear();
7582   return OpsChanged;
7583 }
7584 
7585 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
7586   bool Changed = false;
7587   SmallVector<Value *, 4> Incoming;
7588   SmallPtrSet<Value *, 16> VisitedInstrs;
7589 
7590   bool HaveVectorizedPhiNodes = true;
7591   while (HaveVectorizedPhiNodes) {
7592     HaveVectorizedPhiNodes = false;
7593 
7594     // Collect the incoming values from the PHIs.
7595     Incoming.clear();
7596     for (Instruction &I : *BB) {
7597       PHINode *P = dyn_cast<PHINode>(&I);
7598       if (!P)
7599         break;
7600 
7601       if (!VisitedInstrs.count(P) && !R.isDeleted(P))
7602         Incoming.push_back(P);
7603     }
7604 
7605     // Sort by type.
7606     llvm::stable_sort(Incoming, PhiTypeSorterFunc);
7607 
7608     // Try to vectorize elements base on their type.
7609     for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(),
7610                                            E = Incoming.end();
7611          IncIt != E;) {
7612 
7613       // Look for the next elements with the same type.
7614       SmallVector<Value *, 4>::iterator SameTypeIt = IncIt;
7615       while (SameTypeIt != E &&
7616              (*SameTypeIt)->getType() == (*IncIt)->getType()) {
7617         VisitedInstrs.insert(*SameTypeIt);
7618         ++SameTypeIt;
7619       }
7620 
7621       // Try to vectorize them.
7622       unsigned NumElts = (SameTypeIt - IncIt);
7623       LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at PHIs ("
7624                         << NumElts << ")\n");
7625       // The order in which the phi nodes appear in the program does not matter.
7626       // So allow tryToVectorizeList to reorder them if it is beneficial. This
7627       // is done when there are exactly two elements since tryToVectorizeList
7628       // asserts that there are only two values when AllowReorder is true.
7629       bool AllowReorder = NumElts == 2;
7630       if (NumElts > 1 &&
7631           tryToVectorizeList(makeArrayRef(IncIt, NumElts), R, AllowReorder)) {
7632         // Success start over because instructions might have been changed.
7633         HaveVectorizedPhiNodes = true;
7634         Changed = true;
7635         break;
7636       }
7637 
7638       // Start over at the next instruction of a different type (or the end).
7639       IncIt = SameTypeIt;
7640     }
7641   }
7642 
7643   VisitedInstrs.clear();
7644 
7645   SmallVector<Instruction *, 8> PostProcessInstructions;
7646   SmallDenseSet<Instruction *, 4> KeyNodes;
7647   for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
7648     // Skip instructions with scalable type. The num of elements is unknown at
7649     // compile-time for scalable type.
7650     if (isa<ScalableVectorType>(it->getType()))
7651       continue;
7652 
7653     // Skip instructions marked for the deletion.
7654     if (R.isDeleted(&*it))
7655       continue;
7656     // We may go through BB multiple times so skip the one we have checked.
7657     if (!VisitedInstrs.insert(&*it).second) {
7658       if (it->use_empty() && KeyNodes.contains(&*it) &&
7659           vectorizeSimpleInstructions(PostProcessInstructions, BB, R)) {
7660         // We would like to start over since some instructions are deleted
7661         // and the iterator may become invalid value.
7662         Changed = true;
7663         it = BB->begin();
7664         e = BB->end();
7665       }
7666       continue;
7667     }
7668 
7669     if (isa<DbgInfoIntrinsic>(it))
7670       continue;
7671 
7672     // Try to vectorize reductions that use PHINodes.
7673     if (PHINode *P = dyn_cast<PHINode>(it)) {
7674       // Check that the PHI is a reduction PHI.
7675       if (P->getNumIncomingValues() == 2) {
7676         // Try to match and vectorize a horizontal reduction.
7677         if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R,
7678                                      TTI)) {
7679           Changed = true;
7680           it = BB->begin();
7681           e = BB->end();
7682           continue;
7683         }
7684       }
7685       // Try to vectorize the incoming values of the PHI, to catch reductions
7686       // that feed into PHIs.
7687       for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) {
7688         // Skip if the incoming block is the current BB for now. Also, bypass
7689         // unreachable IR for efficiency and to avoid crashing.
7690         // TODO: Collect the skipped incoming values and try to vectorize them
7691         // after processing BB.
7692         if (BB == P->getIncomingBlock(I) ||
7693             !DT->isReachableFromEntry(P->getIncomingBlock(I)))
7694           continue;
7695 
7696         Changed |= vectorizeRootInstruction(nullptr, P->getIncomingValue(I),
7697                                             P->getIncomingBlock(I), R, TTI);
7698       }
7699       continue;
7700     }
7701 
7702     // Ran into an instruction without users, like terminator, or function call
7703     // with ignored return value, store. Ignore unused instructions (basing on
7704     // instruction type, except for CallInst and InvokeInst).
7705     if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) ||
7706                             isa<InvokeInst>(it))) {
7707       KeyNodes.insert(&*it);
7708       bool OpsChanged = false;
7709       if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) {
7710         for (auto *V : it->operand_values()) {
7711           // Try to match and vectorize a horizontal reduction.
7712           OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI);
7713         }
7714       }
7715       // Start vectorization of post-process list of instructions from the
7716       // top-tree instructions to try to vectorize as many instructions as
7717       // possible.
7718       OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R);
7719       if (OpsChanged) {
7720         // We would like to start over since some instructions are deleted
7721         // and the iterator may become invalid value.
7722         Changed = true;
7723         it = BB->begin();
7724         e = BB->end();
7725         continue;
7726       }
7727     }
7728 
7729     if (isa<InsertElementInst>(it) || isa<CmpInst>(it) ||
7730         isa<InsertValueInst>(it))
7731       PostProcessInstructions.push_back(&*it);
7732   }
7733 
7734   return Changed;
7735 }
7736 
7737 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) {
7738   auto Changed = false;
7739   for (auto &Entry : GEPs) {
7740     // If the getelementptr list has fewer than two elements, there's nothing
7741     // to do.
7742     if (Entry.second.size() < 2)
7743       continue;
7744 
7745     LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "
7746                       << Entry.second.size() << ".\n");
7747 
7748     // Process the GEP list in chunks suitable for the target's supported
7749     // vector size. If a vector register can't hold 1 element, we are done. We
7750     // are trying to vectorize the index computations, so the maximum number of
7751     // elements is based on the size of the index expression, rather than the
7752     // size of the GEP itself (the target's pointer size).
7753     unsigned MaxVecRegSize = R.getMaxVecRegSize();
7754     unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin());
7755     if (MaxVecRegSize < EltSize)
7756       continue;
7757 
7758     unsigned MaxElts = MaxVecRegSize / EltSize;
7759     for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) {
7760       auto Len = std::min<unsigned>(BE - BI, MaxElts);
7761       ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len);
7762 
7763       // Initialize a set a candidate getelementptrs. Note that we use a
7764       // SetVector here to preserve program order. If the index computations
7765       // are vectorizable and begin with loads, we want to minimize the chance
7766       // of having to reorder them later.
7767       SetVector<Value *> Candidates(GEPList.begin(), GEPList.end());
7768 
7769       // Some of the candidates may have already been vectorized after we
7770       // initially collected them. If so, they are marked as deleted, so remove
7771       // them from the set of candidates.
7772       Candidates.remove_if(
7773           [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); });
7774 
7775       // Remove from the set of candidates all pairs of getelementptrs with
7776       // constant differences. Such getelementptrs are likely not good
7777       // candidates for vectorization in a bottom-up phase since one can be
7778       // computed from the other. We also ensure all candidate getelementptr
7779       // indices are unique.
7780       for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) {
7781         auto *GEPI = GEPList[I];
7782         if (!Candidates.count(GEPI))
7783           continue;
7784         auto *SCEVI = SE->getSCEV(GEPList[I]);
7785         for (int J = I + 1; J < E && Candidates.size() > 1; ++J) {
7786           auto *GEPJ = GEPList[J];
7787           auto *SCEVJ = SE->getSCEV(GEPList[J]);
7788           if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) {
7789             Candidates.remove(GEPI);
7790             Candidates.remove(GEPJ);
7791           } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) {
7792             Candidates.remove(GEPJ);
7793           }
7794         }
7795       }
7796 
7797       // We break out of the above computation as soon as we know there are
7798       // fewer than two candidates remaining.
7799       if (Candidates.size() < 2)
7800         continue;
7801 
7802       // Add the single, non-constant index of each candidate to the bundle. We
7803       // ensured the indices met these constraints when we originally collected
7804       // the getelementptrs.
7805       SmallVector<Value *, 16> Bundle(Candidates.size());
7806       auto BundleIndex = 0u;
7807       for (auto *V : Candidates) {
7808         auto *GEP = cast<GetElementPtrInst>(V);
7809         auto *GEPIdx = GEP->idx_begin()->get();
7810         assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx));
7811         Bundle[BundleIndex++] = GEPIdx;
7812       }
7813 
7814       // Try and vectorize the indices. We are currently only interested in
7815       // gather-like cases of the form:
7816       //
7817       // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ...
7818       //
7819       // where the loads of "a", the loads of "b", and the subtractions can be
7820       // performed in parallel. It's likely that detecting this pattern in a
7821       // bottom-up phase will be simpler and less costly than building a
7822       // full-blown top-down phase beginning at the consecutive loads.
7823       Changed |= tryToVectorizeList(Bundle, R);
7824     }
7825   }
7826   return Changed;
7827 }
7828 
7829 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) {
7830   bool Changed = false;
7831   // Attempt to sort and vectorize each of the store-groups.
7832   for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e;
7833        ++it) {
7834     if (it->second.size() < 2)
7835       continue;
7836 
7837     LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
7838                       << it->second.size() << ".\n");
7839 
7840     Changed |= vectorizeStores(it->second, R);
7841   }
7842   return Changed;
7843 }
7844 
7845 char SLPVectorizer::ID = 0;
7846 
7847 static const char lv_name[] = "SLP Vectorizer";
7848 
7849 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false)
7850 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7851 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7852 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7853 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7854 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
7855 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7856 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7857 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7858 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false)
7859 
7860 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); }
7861