1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10 // stores that can be put together into vector-stores. Next, it attempts to
11 // construct vectorizable tree using the use-def chains. If a profitable tree
12 // was found, the SLP vectorizer performs vectorization on the tree.
13 //
14 // The pass is inspired by the work described in the paper:
15 //  "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
16 //
17 //===----------------------------------------------------------------------===//
18 
19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/DenseSet.h"
22 #include "llvm/ADT/Optional.h"
23 #include "llvm/ADT/PostOrderIterator.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SetVector.h"
26 #include "llvm/ADT/SmallBitVector.h"
27 #include "llvm/ADT/SmallPtrSet.h"
28 #include "llvm/ADT/SmallSet.h"
29 #include "llvm/ADT/SmallString.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/ADT/iterator.h"
32 #include "llvm/ADT/iterator_range.h"
33 #include "llvm/Analysis/AliasAnalysis.h"
34 #include "llvm/Analysis/AssumptionCache.h"
35 #include "llvm/Analysis/CodeMetrics.h"
36 #include "llvm/Analysis/DemandedBits.h"
37 #include "llvm/Analysis/GlobalsModRef.h"
38 #include "llvm/Analysis/LoopAccessAnalysis.h"
39 #include "llvm/Analysis/LoopInfo.h"
40 #include "llvm/Analysis/MemoryLocation.h"
41 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
42 #include "llvm/Analysis/ScalarEvolution.h"
43 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
44 #include "llvm/Analysis/TargetLibraryInfo.h"
45 #include "llvm/Analysis/TargetTransformInfo.h"
46 #include "llvm/Analysis/ValueTracking.h"
47 #include "llvm/Analysis/VectorUtils.h"
48 #include "llvm/IR/Attributes.h"
49 #include "llvm/IR/BasicBlock.h"
50 #include "llvm/IR/Constant.h"
51 #include "llvm/IR/Constants.h"
52 #include "llvm/IR/DataLayout.h"
53 #include "llvm/IR/DebugLoc.h"
54 #include "llvm/IR/DerivedTypes.h"
55 #include "llvm/IR/Dominators.h"
56 #include "llvm/IR/Function.h"
57 #include "llvm/IR/IRBuilder.h"
58 #include "llvm/IR/InstrTypes.h"
59 #include "llvm/IR/Instruction.h"
60 #include "llvm/IR/Instructions.h"
61 #include "llvm/IR/IntrinsicInst.h"
62 #include "llvm/IR/Intrinsics.h"
63 #include "llvm/IR/Module.h"
64 #include "llvm/IR/NoFolder.h"
65 #include "llvm/IR/Operator.h"
66 #include "llvm/IR/PatternMatch.h"
67 #include "llvm/IR/Type.h"
68 #include "llvm/IR/Use.h"
69 #include "llvm/IR/User.h"
70 #include "llvm/IR/Value.h"
71 #include "llvm/IR/ValueHandle.h"
72 #include "llvm/IR/Verifier.h"
73 #include "llvm/InitializePasses.h"
74 #include "llvm/Pass.h"
75 #include "llvm/Support/Casting.h"
76 #include "llvm/Support/CommandLine.h"
77 #include "llvm/Support/Compiler.h"
78 #include "llvm/Support/DOTGraphTraits.h"
79 #include "llvm/Support/Debug.h"
80 #include "llvm/Support/ErrorHandling.h"
81 #include "llvm/Support/GraphWriter.h"
82 #include "llvm/Support/KnownBits.h"
83 #include "llvm/Support/MathExtras.h"
84 #include "llvm/Support/raw_ostream.h"
85 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
86 #include "llvm/Transforms/Utils/LoopUtils.h"
87 #include "llvm/Transforms/Vectorize.h"
88 #include <algorithm>
89 #include <cassert>
90 #include <cstdint>
91 #include <iterator>
92 #include <memory>
93 #include <set>
94 #include <string>
95 #include <tuple>
96 #include <utility>
97 #include <vector>
98 
99 using namespace llvm;
100 using namespace llvm::PatternMatch;
101 using namespace slpvectorizer;
102 
103 #define SV_NAME "slp-vectorizer"
104 #define DEBUG_TYPE "SLP"
105 
106 STATISTIC(NumVectorInstructions, "Number of vector instructions generated");
107 
108 cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden,
109                                   cl::desc("Run the SLP vectorization passes"));
110 
111 static cl::opt<int>
112     SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
113                      cl::desc("Only vectorize if you gain more than this "
114                               "number "));
115 
116 static cl::opt<bool>
117 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden,
118                    cl::desc("Attempt to vectorize horizontal reductions"));
119 
120 static cl::opt<bool> ShouldStartVectorizeHorAtStore(
121     "slp-vectorize-hor-store", cl::init(false), cl::Hidden,
122     cl::desc(
123         "Attempt to vectorize horizontal reductions feeding into a store"));
124 
125 static cl::opt<int>
126 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden,
127     cl::desc("Attempt to vectorize for this register size in bits"));
128 
129 static cl::opt<unsigned>
130 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden,
131     cl::desc("Maximum SLP vectorization factor (0=unlimited)"));
132 
133 static cl::opt<int>
134 MaxStoreLookup("slp-max-store-lookup", cl::init(32), cl::Hidden,
135     cl::desc("Maximum depth of the lookup for consecutive stores."));
136 
137 /// Limits the size of scheduling regions in a block.
138 /// It avoid long compile times for _very_ large blocks where vector
139 /// instructions are spread over a wide range.
140 /// This limit is way higher than needed by real-world functions.
141 static cl::opt<int>
142 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden,
143     cl::desc("Limit the size of the SLP scheduling region per block"));
144 
145 static cl::opt<int> MinVectorRegSizeOption(
146     "slp-min-reg-size", cl::init(128), cl::Hidden,
147     cl::desc("Attempt to vectorize for this register size in bits"));
148 
149 static cl::opt<unsigned> RecursionMaxDepth(
150     "slp-recursion-max-depth", cl::init(12), cl::Hidden,
151     cl::desc("Limit the recursion depth when building a vectorizable tree"));
152 
153 static cl::opt<unsigned> MinTreeSize(
154     "slp-min-tree-size", cl::init(3), cl::Hidden,
155     cl::desc("Only vectorize small trees if they are fully vectorizable"));
156 
157 // The maximum depth that the look-ahead score heuristic will explore.
158 // The higher this value, the higher the compilation time overhead.
159 static cl::opt<int> LookAheadMaxDepth(
160     "slp-max-look-ahead-depth", cl::init(2), cl::Hidden,
161     cl::desc("The maximum look-ahead depth for operand reordering scores"));
162 
163 // The Look-ahead heuristic goes through the users of the bundle to calculate
164 // the users cost in getExternalUsesCost(). To avoid compilation time increase
165 // we limit the number of users visited to this value.
166 static cl::opt<unsigned> LookAheadUsersBudget(
167     "slp-look-ahead-users-budget", cl::init(2), cl::Hidden,
168     cl::desc("The maximum number of users to visit while visiting the "
169              "predecessors. This prevents compilation time increase."));
170 
171 static cl::opt<bool>
172     ViewSLPTree("view-slp-tree", cl::Hidden,
173                 cl::desc("Display the SLP trees with Graphviz"));
174 
175 // Limit the number of alias checks. The limit is chosen so that
176 // it has no negative effect on the llvm benchmarks.
177 static const unsigned AliasedCheckLimit = 10;
178 
179 // Another limit for the alias checks: The maximum distance between load/store
180 // instructions where alias checks are done.
181 // This limit is useful for very large basic blocks.
182 static const unsigned MaxMemDepDistance = 160;
183 
184 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling
185 /// regions to be handled.
186 static const int MinScheduleRegionSize = 16;
187 
188 /// Predicate for the element types that the SLP vectorizer supports.
189 ///
190 /// The most important thing to filter here are types which are invalid in LLVM
191 /// vectors. We also filter target specific types which have absolutely no
192 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
193 /// avoids spending time checking the cost model and realizing that they will
194 /// be inevitably scalarized.
195 static bool isValidElementType(Type *Ty) {
196   return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
197          !Ty->isPPC_FP128Ty();
198 }
199 
200 /// \returns true if all of the instructions in \p VL are in the same block or
201 /// false otherwise.
202 static bool allSameBlock(ArrayRef<Value *> VL) {
203   Instruction *I0 = dyn_cast<Instruction>(VL[0]);
204   if (!I0)
205     return false;
206   BasicBlock *BB = I0->getParent();
207   for (int I = 1, E = VL.size(); I < E; I++) {
208     auto *II = dyn_cast<Instruction>(VL[I]);
209     if (!II)
210       return false;
211 
212     if (BB != II->getParent())
213       return false;
214   }
215   return true;
216 }
217 
218 /// \returns True if all of the values in \p VL are constants (but not
219 /// globals/constant expressions).
220 static bool allConstant(ArrayRef<Value *> VL) {
221   // Constant expressions and globals can't be vectorized like normal integer/FP
222   // constants.
223   for (Value *i : VL)
224     if (!isa<Constant>(i) || isa<ConstantExpr>(i) || isa<GlobalValue>(i))
225       return false;
226   return true;
227 }
228 
229 /// \returns True if all of the values in \p VL are identical.
230 static bool isSplat(ArrayRef<Value *> VL) {
231   for (unsigned i = 1, e = VL.size(); i < e; ++i)
232     if (VL[i] != VL[0])
233       return false;
234   return true;
235 }
236 
237 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator.
238 static bool isCommutative(Instruction *I) {
239   if (auto *Cmp = dyn_cast<CmpInst>(I))
240     return Cmp->isCommutative();
241   if (auto *BO = dyn_cast<BinaryOperator>(I))
242     return BO->isCommutative();
243   // TODO: This should check for generic Instruction::isCommutative(), but
244   //       we need to confirm that the caller code correctly handles Intrinsics
245   //       for example (does not have 2 operands).
246   return false;
247 }
248 
249 /// Checks if the vector of instructions can be represented as a shuffle, like:
250 /// %x0 = extractelement <4 x i8> %x, i32 0
251 /// %x3 = extractelement <4 x i8> %x, i32 3
252 /// %y1 = extractelement <4 x i8> %y, i32 1
253 /// %y2 = extractelement <4 x i8> %y, i32 2
254 /// %x0x0 = mul i8 %x0, %x0
255 /// %x3x3 = mul i8 %x3, %x3
256 /// %y1y1 = mul i8 %y1, %y1
257 /// %y2y2 = mul i8 %y2, %y2
258 /// %ins1 = insertelement <4 x i8> undef, i8 %x0x0, i32 0
259 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1
260 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2
261 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3
262 /// ret <4 x i8> %ins4
263 /// can be transformed into:
264 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5,
265 ///                                                         i32 6>
266 /// %2 = mul <4 x i8> %1, %1
267 /// ret <4 x i8> %2
268 /// We convert this initially to something like:
269 /// %x0 = extractelement <4 x i8> %x, i32 0
270 /// %x3 = extractelement <4 x i8> %x, i32 3
271 /// %y1 = extractelement <4 x i8> %y, i32 1
272 /// %y2 = extractelement <4 x i8> %y, i32 2
273 /// %1 = insertelement <4 x i8> undef, i8 %x0, i32 0
274 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1
275 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2
276 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3
277 /// %5 = mul <4 x i8> %4, %4
278 /// %6 = extractelement <4 x i8> %5, i32 0
279 /// %ins1 = insertelement <4 x i8> undef, i8 %6, i32 0
280 /// %7 = extractelement <4 x i8> %5, i32 1
281 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1
282 /// %8 = extractelement <4 x i8> %5, i32 2
283 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2
284 /// %9 = extractelement <4 x i8> %5, i32 3
285 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3
286 /// ret <4 x i8> %ins4
287 /// InstCombiner transforms this into a shuffle and vector mul
288 /// TODO: Can we split off and reuse the shuffle mask detection from
289 /// TargetTransformInfo::getInstructionThroughput?
290 static Optional<TargetTransformInfo::ShuffleKind>
291 isShuffle(ArrayRef<Value *> VL) {
292   auto *EI0 = cast<ExtractElementInst>(VL[0]);
293   unsigned Size =
294       cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements();
295   Value *Vec1 = nullptr;
296   Value *Vec2 = nullptr;
297   enum ShuffleMode { Unknown, Select, Permute };
298   ShuffleMode CommonShuffleMode = Unknown;
299   for (unsigned I = 0, E = VL.size(); I < E; ++I) {
300     auto *EI = cast<ExtractElementInst>(VL[I]);
301     auto *Vec = EI->getVectorOperand();
302     // All vector operands must have the same number of vector elements.
303     if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size)
304       return None;
305     auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand());
306     if (!Idx)
307       return None;
308     // Undefined behavior if Idx is negative or >= Size.
309     if (Idx->getValue().uge(Size))
310       continue;
311     unsigned IntIdx = Idx->getValue().getZExtValue();
312     // We can extractelement from undef vector.
313     if (isa<UndefValue>(Vec))
314       continue;
315     // For correct shuffling we have to have at most 2 different vector operands
316     // in all extractelement instructions.
317     if (!Vec1 || Vec1 == Vec)
318       Vec1 = Vec;
319     else if (!Vec2 || Vec2 == Vec)
320       Vec2 = Vec;
321     else
322       return None;
323     if (CommonShuffleMode == Permute)
324       continue;
325     // If the extract index is not the same as the operation number, it is a
326     // permutation.
327     if (IntIdx != I) {
328       CommonShuffleMode = Permute;
329       continue;
330     }
331     CommonShuffleMode = Select;
332   }
333   // If we're not crossing lanes in different vectors, consider it as blending.
334   if (CommonShuffleMode == Select && Vec2)
335     return TargetTransformInfo::SK_Select;
336   // If Vec2 was never used, we have a permutation of a single vector, otherwise
337   // we have permutation of 2 vectors.
338   return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc
339               : TargetTransformInfo::SK_PermuteSingleSrc;
340 }
341 
342 namespace {
343 
344 /// Main data required for vectorization of instructions.
345 struct InstructionsState {
346   /// The very first instruction in the list with the main opcode.
347   Value *OpValue = nullptr;
348 
349   /// The main/alternate instruction.
350   Instruction *MainOp = nullptr;
351   Instruction *AltOp = nullptr;
352 
353   /// The main/alternate opcodes for the list of instructions.
354   unsigned getOpcode() const {
355     return MainOp ? MainOp->getOpcode() : 0;
356   }
357 
358   unsigned getAltOpcode() const {
359     return AltOp ? AltOp->getOpcode() : 0;
360   }
361 
362   /// Some of the instructions in the list have alternate opcodes.
363   bool isAltShuffle() const { return getOpcode() != getAltOpcode(); }
364 
365   bool isOpcodeOrAlt(Instruction *I) const {
366     unsigned CheckedOpcode = I->getOpcode();
367     return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode;
368   }
369 
370   InstructionsState() = delete;
371   InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp)
372       : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {}
373 };
374 
375 } // end anonymous namespace
376 
377 /// Chooses the correct key for scheduling data. If \p Op has the same (or
378 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p
379 /// OpValue.
380 static Value *isOneOf(const InstructionsState &S, Value *Op) {
381   auto *I = dyn_cast<Instruction>(Op);
382   if (I && S.isOpcodeOrAlt(I))
383     return Op;
384   return S.OpValue;
385 }
386 
387 /// \returns true if \p Opcode is allowed as part of of the main/alternate
388 /// instruction for SLP vectorization.
389 ///
390 /// Example of unsupported opcode is SDIV that can potentially cause UB if the
391 /// "shuffled out" lane would result in division by zero.
392 static bool isValidForAlternation(unsigned Opcode) {
393   if (Instruction::isIntDivRem(Opcode))
394     return false;
395 
396   return true;
397 }
398 
399 /// \returns analysis of the Instructions in \p VL described in
400 /// InstructionsState, the Opcode that we suppose the whole list
401 /// could be vectorized even if its structure is diverse.
402 static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
403                                        unsigned BaseIndex = 0) {
404   // Make sure these are all Instructions.
405   if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); }))
406     return InstructionsState(VL[BaseIndex], nullptr, nullptr);
407 
408   bool IsCastOp = isa<CastInst>(VL[BaseIndex]);
409   bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]);
410   unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode();
411   unsigned AltOpcode = Opcode;
412   unsigned AltIndex = BaseIndex;
413 
414   // Check for one alternate opcode from another BinaryOperator.
415   // TODO - generalize to support all operators (types, calls etc.).
416   for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) {
417     unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode();
418     if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) {
419       if (InstOpcode == Opcode || InstOpcode == AltOpcode)
420         continue;
421       if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) &&
422           isValidForAlternation(Opcode)) {
423         AltOpcode = InstOpcode;
424         AltIndex = Cnt;
425         continue;
426       }
427     } else if (IsCastOp && isa<CastInst>(VL[Cnt])) {
428       Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType();
429       Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType();
430       if (Ty0 == Ty1) {
431         if (InstOpcode == Opcode || InstOpcode == AltOpcode)
432           continue;
433         if (Opcode == AltOpcode) {
434           assert(isValidForAlternation(Opcode) &&
435                  isValidForAlternation(InstOpcode) &&
436                  "Cast isn't safe for alternation, logic needs to be updated!");
437           AltOpcode = InstOpcode;
438           AltIndex = Cnt;
439           continue;
440         }
441       }
442     } else if (InstOpcode == Opcode || InstOpcode == AltOpcode)
443       continue;
444     return InstructionsState(VL[BaseIndex], nullptr, nullptr);
445   }
446 
447   return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]),
448                            cast<Instruction>(VL[AltIndex]));
449 }
450 
451 /// \returns true if all of the values in \p VL have the same type or false
452 /// otherwise.
453 static bool allSameType(ArrayRef<Value *> VL) {
454   Type *Ty = VL[0]->getType();
455   for (int i = 1, e = VL.size(); i < e; i++)
456     if (VL[i]->getType() != Ty)
457       return false;
458 
459   return true;
460 }
461 
462 /// \returns True if Extract{Value,Element} instruction extracts element Idx.
463 static Optional<unsigned> getExtractIndex(Instruction *E) {
464   unsigned Opcode = E->getOpcode();
465   assert((Opcode == Instruction::ExtractElement ||
466           Opcode == Instruction::ExtractValue) &&
467          "Expected extractelement or extractvalue instruction.");
468   if (Opcode == Instruction::ExtractElement) {
469     auto *CI = dyn_cast<ConstantInt>(E->getOperand(1));
470     if (!CI)
471       return None;
472     return CI->getZExtValue();
473   }
474   ExtractValueInst *EI = cast<ExtractValueInst>(E);
475   if (EI->getNumIndices() != 1)
476     return None;
477   return *EI->idx_begin();
478 }
479 
480 /// \returns True if in-tree use also needs extract. This refers to
481 /// possible scalar operand in vectorized instruction.
482 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
483                                     TargetLibraryInfo *TLI) {
484   unsigned Opcode = UserInst->getOpcode();
485   switch (Opcode) {
486   case Instruction::Load: {
487     LoadInst *LI = cast<LoadInst>(UserInst);
488     return (LI->getPointerOperand() == Scalar);
489   }
490   case Instruction::Store: {
491     StoreInst *SI = cast<StoreInst>(UserInst);
492     return (SI->getPointerOperand() == Scalar);
493   }
494   case Instruction::Call: {
495     CallInst *CI = cast<CallInst>(UserInst);
496     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
497     for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
498       if (hasVectorInstrinsicScalarOpd(ID, i))
499         return (CI->getArgOperand(i) == Scalar);
500     }
501     LLVM_FALLTHROUGH;
502   }
503   default:
504     return false;
505   }
506 }
507 
508 /// \returns the AA location that is being access by the instruction.
509 static MemoryLocation getLocation(Instruction *I, AAResults *AA) {
510   if (StoreInst *SI = dyn_cast<StoreInst>(I))
511     return MemoryLocation::get(SI);
512   if (LoadInst *LI = dyn_cast<LoadInst>(I))
513     return MemoryLocation::get(LI);
514   return MemoryLocation();
515 }
516 
517 /// \returns True if the instruction is not a volatile or atomic load/store.
518 static bool isSimple(Instruction *I) {
519   if (LoadInst *LI = dyn_cast<LoadInst>(I))
520     return LI->isSimple();
521   if (StoreInst *SI = dyn_cast<StoreInst>(I))
522     return SI->isSimple();
523   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
524     return !MI->isVolatile();
525   return true;
526 }
527 
528 namespace llvm {
529 
530 static void inversePermutation(ArrayRef<unsigned> Indices,
531                                SmallVectorImpl<int> &Mask) {
532   Mask.clear();
533   const unsigned E = Indices.size();
534   Mask.resize(E, E + 1);
535   for (unsigned I = 0; I < E; ++I)
536     Mask[Indices[I]] = I;
537 }
538 
539 namespace slpvectorizer {
540 
541 /// Bottom Up SLP Vectorizer.
542 class BoUpSLP {
543   struct TreeEntry;
544   struct ScheduleData;
545 
546 public:
547   using ValueList = SmallVector<Value *, 8>;
548   using InstrList = SmallVector<Instruction *, 16>;
549   using ValueSet = SmallPtrSet<Value *, 16>;
550   using StoreList = SmallVector<StoreInst *, 8>;
551   using ExtraValueToDebugLocsMap =
552       MapVector<Value *, SmallVector<Instruction *, 2>>;
553   using OrdersType = SmallVector<unsigned, 4>;
554 
555   BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti,
556           TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li,
557           DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB,
558           const DataLayout *DL, OptimizationRemarkEmitter *ORE)
559       : F(Func), SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC),
560         DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) {
561     CodeMetrics::collectEphemeralValues(F, AC, EphValues);
562     // Use the vector register size specified by the target unless overridden
563     // by a command-line option.
564     // TODO: It would be better to limit the vectorization factor based on
565     //       data type rather than just register size. For example, x86 AVX has
566     //       256-bit registers, but it does not support integer operations
567     //       at that width (that requires AVX2).
568     if (MaxVectorRegSizeOption.getNumOccurrences())
569       MaxVecRegSize = MaxVectorRegSizeOption;
570     else
571       MaxVecRegSize = TTI->getRegisterBitWidth(true);
572 
573     if (MinVectorRegSizeOption.getNumOccurrences())
574       MinVecRegSize = MinVectorRegSizeOption;
575     else
576       MinVecRegSize = TTI->getMinVectorRegisterBitWidth();
577   }
578 
579   /// Vectorize the tree that starts with the elements in \p VL.
580   /// Returns the vectorized root.
581   Value *vectorizeTree();
582 
583   /// Vectorize the tree but with the list of externally used values \p
584   /// ExternallyUsedValues. Values in this MapVector can be replaced but the
585   /// generated extractvalue instructions.
586   Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues);
587 
588   /// \returns the cost incurred by unwanted spills and fills, caused by
589   /// holding live values over call sites.
590   int getSpillCost() const;
591 
592   /// \returns the vectorization cost of the subtree that starts at \p VL.
593   /// A negative number means that this is profitable.
594   int getTreeCost();
595 
596   /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
597   /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
598   void buildTree(ArrayRef<Value *> Roots,
599                  ArrayRef<Value *> UserIgnoreLst = None);
600 
601   /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
602   /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking
603   /// into account (and updating it, if required) list of externally used
604   /// values stored in \p ExternallyUsedValues.
605   void buildTree(ArrayRef<Value *> Roots,
606                  ExtraValueToDebugLocsMap &ExternallyUsedValues,
607                  ArrayRef<Value *> UserIgnoreLst = None);
608 
609   /// Clear the internal data structures that are created by 'buildTree'.
610   void deleteTree() {
611     VectorizableTree.clear();
612     ScalarToTreeEntry.clear();
613     MustGather.clear();
614     ExternalUses.clear();
615     NumOpsWantToKeepOrder.clear();
616     NumOpsWantToKeepOriginalOrder = 0;
617     for (auto &Iter : BlocksSchedules) {
618       BlockScheduling *BS = Iter.second.get();
619       BS->clear();
620     }
621     MinBWs.clear();
622   }
623 
624   unsigned getTreeSize() const { return VectorizableTree.size(); }
625 
626   /// Perform LICM and CSE on the newly generated gather sequences.
627   void optimizeGatherSequence();
628 
629   /// \returns The best order of instructions for vectorization.
630   Optional<ArrayRef<unsigned>> bestOrder() const {
631     assert(llvm::all_of(
632                NumOpsWantToKeepOrder,
633                [this](const decltype(NumOpsWantToKeepOrder)::value_type &D) {
634                  return D.getFirst().size() ==
635                         VectorizableTree[0]->Scalars.size();
636                }) &&
637            "All orders must have the same size as number of instructions in "
638            "tree node.");
639     auto I = std::max_element(
640         NumOpsWantToKeepOrder.begin(), NumOpsWantToKeepOrder.end(),
641         [](const decltype(NumOpsWantToKeepOrder)::value_type &D1,
642            const decltype(NumOpsWantToKeepOrder)::value_type &D2) {
643           return D1.second < D2.second;
644         });
645     if (I == NumOpsWantToKeepOrder.end() ||
646         I->getSecond() <= NumOpsWantToKeepOriginalOrder)
647       return None;
648 
649     return makeArrayRef(I->getFirst());
650   }
651 
652   /// Builds the correct order for root instructions.
653   /// If some leaves have the same instructions to be vectorized, we may
654   /// incorrectly evaluate the best order for the root node (it is built for the
655   /// vector of instructions without repeated instructions and, thus, has less
656   /// elements than the root node). This function builds the correct order for
657   /// the root node.
658   /// For example, if the root node is \<a+b, a+c, a+d, f+e\>, then the leaves
659   /// are \<a, a, a, f\> and \<b, c, d, e\>. When we try to vectorize the first
660   /// leaf, it will be shrink to \<a, b\>. If instructions in this leaf should
661   /// be reordered, the best order will be \<1, 0\>. We need to extend this
662   /// order for the root node. For the root node this order should look like
663   /// \<3, 0, 1, 2\>. This function extends the order for the reused
664   /// instructions.
665   void findRootOrder(OrdersType &Order) {
666     // If the leaf has the same number of instructions to vectorize as the root
667     // - order must be set already.
668     unsigned RootSize = VectorizableTree[0]->Scalars.size();
669     if (Order.size() == RootSize)
670       return;
671     SmallVector<unsigned, 4> RealOrder(Order.size());
672     std::swap(Order, RealOrder);
673     SmallVector<int, 4> Mask;
674     inversePermutation(RealOrder, Mask);
675     Order.assign(Mask.begin(), Mask.end());
676     // The leaf has less number of instructions - need to find the true order of
677     // the root.
678     // Scan the nodes starting from the leaf back to the root.
679     const TreeEntry *PNode = VectorizableTree.back().get();
680     SmallVector<const TreeEntry *, 4> Nodes(1, PNode);
681     SmallPtrSet<const TreeEntry *, 4> Visited;
682     while (!Nodes.empty() && Order.size() != RootSize) {
683       const TreeEntry *PNode = Nodes.pop_back_val();
684       if (!Visited.insert(PNode).second)
685         continue;
686       const TreeEntry &Node = *PNode;
687       for (const EdgeInfo &EI : Node.UserTreeIndices)
688         if (EI.UserTE)
689           Nodes.push_back(EI.UserTE);
690       if (Node.ReuseShuffleIndices.empty())
691         continue;
692       // Build the order for the parent node.
693       OrdersType NewOrder(Node.ReuseShuffleIndices.size(), RootSize);
694       SmallVector<unsigned, 4> OrderCounter(Order.size(), 0);
695       // The algorithm of the order extension is:
696       // 1. Calculate the number of the same instructions for the order.
697       // 2. Calculate the index of the new order: total number of instructions
698       // with order less than the order of the current instruction + reuse
699       // number of the current instruction.
700       // 3. The new order is just the index of the instruction in the original
701       // vector of the instructions.
702       for (unsigned I : Node.ReuseShuffleIndices)
703         ++OrderCounter[Order[I]];
704       SmallVector<unsigned, 4> CurrentCounter(Order.size(), 0);
705       for (unsigned I = 0, E = Node.ReuseShuffleIndices.size(); I < E; ++I) {
706         unsigned ReusedIdx = Node.ReuseShuffleIndices[I];
707         unsigned OrderIdx = Order[ReusedIdx];
708         unsigned NewIdx = 0;
709         for (unsigned J = 0; J < OrderIdx; ++J)
710           NewIdx += OrderCounter[J];
711         NewIdx += CurrentCounter[OrderIdx];
712         ++CurrentCounter[OrderIdx];
713         assert(NewOrder[NewIdx] == RootSize &&
714                "The order index should not be written already.");
715         NewOrder[NewIdx] = I;
716       }
717       std::swap(Order, NewOrder);
718     }
719     assert(Order.size() == RootSize &&
720            "Root node is expected or the size of the order must be the same as "
721            "the number of elements in the root node.");
722     assert(llvm::all_of(Order,
723                         [RootSize](unsigned Val) { return Val != RootSize; }) &&
724            "All indices must be initialized");
725   }
726 
727   /// \return The vector element size in bits to use when vectorizing the
728   /// expression tree ending at \p V. If V is a store, the size is the width of
729   /// the stored value. Otherwise, the size is the width of the largest loaded
730   /// value reaching V. This method is used by the vectorizer to calculate
731   /// vectorization factors.
732   unsigned getVectorElementSize(Value *V);
733 
734   /// Compute the minimum type sizes required to represent the entries in a
735   /// vectorizable tree.
736   void computeMinimumValueSizes();
737 
738   // \returns maximum vector register size as set by TTI or overridden by cl::opt.
739   unsigned getMaxVecRegSize() const {
740     return MaxVecRegSize;
741   }
742 
743   // \returns minimum vector register size as set by cl::opt.
744   unsigned getMinVecRegSize() const {
745     return MinVecRegSize;
746   }
747 
748   unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
749     unsigned MaxVF = MaxVFOption.getNumOccurrences() ?
750       MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode);
751     return MaxVF ? MaxVF : UINT_MAX;
752   }
753 
754   /// Check if homogeneous aggregate is isomorphic to some VectorType.
755   /// Accepts homogeneous multidimensional aggregate of scalars/vectors like
756   /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> },
757   /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on.
758   ///
759   /// \returns number of elements in vector if isomorphism exists, 0 otherwise.
760   unsigned canMapToVector(Type *T, const DataLayout &DL) const;
761 
762   /// \returns True if the VectorizableTree is both tiny and not fully
763   /// vectorizable. We do not vectorize such trees.
764   bool isTreeTinyAndNotFullyVectorizable() const;
765 
766   /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values
767   /// can be load combined in the backend. Load combining may not be allowed in
768   /// the IR optimizer, so we do not want to alter the pattern. For example,
769   /// partially transforming a scalar bswap() pattern into vector code is
770   /// effectively impossible for the backend to undo.
771   /// TODO: If load combining is allowed in the IR optimizer, this analysis
772   ///       may not be necessary.
773   bool isLoadCombineReductionCandidate(unsigned ReductionOpcode) const;
774 
775   /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values
776   /// can be load combined in the backend. Load combining may not be allowed in
777   /// the IR optimizer, so we do not want to alter the pattern. For example,
778   /// partially transforming a scalar bswap() pattern into vector code is
779   /// effectively impossible for the backend to undo.
780   /// TODO: If load combining is allowed in the IR optimizer, this analysis
781   ///       may not be necessary.
782   bool isLoadCombineCandidate() const;
783 
784   OptimizationRemarkEmitter *getORE() { return ORE; }
785 
786   /// This structure holds any data we need about the edges being traversed
787   /// during buildTree_rec(). We keep track of:
788   /// (i) the user TreeEntry index, and
789   /// (ii) the index of the edge.
790   struct EdgeInfo {
791     EdgeInfo() = default;
792     EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx)
793         : UserTE(UserTE), EdgeIdx(EdgeIdx) {}
794     /// The user TreeEntry.
795     TreeEntry *UserTE = nullptr;
796     /// The operand index of the use.
797     unsigned EdgeIdx = UINT_MAX;
798 #ifndef NDEBUG
799     friend inline raw_ostream &operator<<(raw_ostream &OS,
800                                           const BoUpSLP::EdgeInfo &EI) {
801       EI.dump(OS);
802       return OS;
803     }
804     /// Debug print.
805     void dump(raw_ostream &OS) const {
806       OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null")
807          << " EdgeIdx:" << EdgeIdx << "}";
808     }
809     LLVM_DUMP_METHOD void dump() const { dump(dbgs()); }
810 #endif
811   };
812 
813   /// A helper data structure to hold the operands of a vector of instructions.
814   /// This supports a fixed vector length for all operand vectors.
815   class VLOperands {
816     /// For each operand we need (i) the value, and (ii) the opcode that it
817     /// would be attached to if the expression was in a left-linearized form.
818     /// This is required to avoid illegal operand reordering.
819     /// For example:
820     /// \verbatim
821     ///                         0 Op1
822     ///                         |/
823     /// Op1 Op2   Linearized    + Op2
824     ///   \ /     ---------->   |/
825     ///    -                    -
826     ///
827     /// Op1 - Op2            (0 + Op1) - Op2
828     /// \endverbatim
829     ///
830     /// Value Op1 is attached to a '+' operation, and Op2 to a '-'.
831     ///
832     /// Another way to think of this is to track all the operations across the
833     /// path from the operand all the way to the root of the tree and to
834     /// calculate the operation that corresponds to this path. For example, the
835     /// path from Op2 to the root crosses the RHS of the '-', therefore the
836     /// corresponding operation is a '-' (which matches the one in the
837     /// linearized tree, as shown above).
838     ///
839     /// For lack of a better term, we refer to this operation as Accumulated
840     /// Path Operation (APO).
841     struct OperandData {
842       OperandData() = default;
843       OperandData(Value *V, bool APO, bool IsUsed)
844           : V(V), APO(APO), IsUsed(IsUsed) {}
845       /// The operand value.
846       Value *V = nullptr;
847       /// TreeEntries only allow a single opcode, or an alternate sequence of
848       /// them (e.g, +, -). Therefore, we can safely use a boolean value for the
849       /// APO. It is set to 'true' if 'V' is attached to an inverse operation
850       /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise
851       /// (e.g., Add/Mul)
852       bool APO = false;
853       /// Helper data for the reordering function.
854       bool IsUsed = false;
855     };
856 
857     /// During operand reordering, we are trying to select the operand at lane
858     /// that matches best with the operand at the neighboring lane. Our
859     /// selection is based on the type of value we are looking for. For example,
860     /// if the neighboring lane has a load, we need to look for a load that is
861     /// accessing a consecutive address. These strategies are summarized in the
862     /// 'ReorderingMode' enumerator.
863     enum class ReorderingMode {
864       Load,     ///< Matching loads to consecutive memory addresses
865       Opcode,   ///< Matching instructions based on opcode (same or alternate)
866       Constant, ///< Matching constants
867       Splat,    ///< Matching the same instruction multiple times (broadcast)
868       Failed,   ///< We failed to create a vectorizable group
869     };
870 
871     using OperandDataVec = SmallVector<OperandData, 2>;
872 
873     /// A vector of operand vectors.
874     SmallVector<OperandDataVec, 4> OpsVec;
875 
876     const DataLayout &DL;
877     ScalarEvolution &SE;
878     const BoUpSLP &R;
879 
880     /// \returns the operand data at \p OpIdx and \p Lane.
881     OperandData &getData(unsigned OpIdx, unsigned Lane) {
882       return OpsVec[OpIdx][Lane];
883     }
884 
885     /// \returns the operand data at \p OpIdx and \p Lane. Const version.
886     const OperandData &getData(unsigned OpIdx, unsigned Lane) const {
887       return OpsVec[OpIdx][Lane];
888     }
889 
890     /// Clears the used flag for all entries.
891     void clearUsed() {
892       for (unsigned OpIdx = 0, NumOperands = getNumOperands();
893            OpIdx != NumOperands; ++OpIdx)
894         for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes;
895              ++Lane)
896           OpsVec[OpIdx][Lane].IsUsed = false;
897     }
898 
899     /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2.
900     void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) {
901       std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]);
902     }
903 
904     // The hard-coded scores listed here are not very important. When computing
905     // the scores of matching one sub-tree with another, we are basically
906     // counting the number of values that are matching. So even if all scores
907     // are set to 1, we would still get a decent matching result.
908     // However, sometimes we have to break ties. For example we may have to
909     // choose between matching loads vs matching opcodes. This is what these
910     // scores are helping us with: they provide the order of preference.
911 
912     /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]).
913     static const int ScoreConsecutiveLoads = 3;
914     /// ExtractElementInst from same vector and consecutive indexes.
915     static const int ScoreConsecutiveExtracts = 3;
916     /// Constants.
917     static const int ScoreConstants = 2;
918     /// Instructions with the same opcode.
919     static const int ScoreSameOpcode = 2;
920     /// Instructions with alt opcodes (e.g, add + sub).
921     static const int ScoreAltOpcodes = 1;
922     /// Identical instructions (a.k.a. splat or broadcast).
923     static const int ScoreSplat = 1;
924     /// Matching with an undef is preferable to failing.
925     static const int ScoreUndef = 1;
926     /// Score for failing to find a decent match.
927     static const int ScoreFail = 0;
928     /// User exteranl to the vectorized code.
929     static const int ExternalUseCost = 1;
930     /// The user is internal but in a different lane.
931     static const int UserInDiffLaneCost = ExternalUseCost;
932 
933     /// \returns the score of placing \p V1 and \p V2 in consecutive lanes.
934     static int getShallowScore(Value *V1, Value *V2, const DataLayout &DL,
935                                ScalarEvolution &SE) {
936       auto *LI1 = dyn_cast<LoadInst>(V1);
937       auto *LI2 = dyn_cast<LoadInst>(V2);
938       if (LI1 && LI2)
939         return isConsecutiveAccess(LI1, LI2, DL, SE)
940                    ? VLOperands::ScoreConsecutiveLoads
941                    : VLOperands::ScoreFail;
942 
943       auto *C1 = dyn_cast<Constant>(V1);
944       auto *C2 = dyn_cast<Constant>(V2);
945       if (C1 && C2)
946         return VLOperands::ScoreConstants;
947 
948       // Extracts from consecutive indexes of the same vector better score as
949       // the extracts could be optimized away.
950       Value *EV;
951       ConstantInt *Ex1Idx, *Ex2Idx;
952       if (match(V1, m_ExtractElt(m_Value(EV), m_ConstantInt(Ex1Idx))) &&
953           match(V2, m_ExtractElt(m_Deferred(EV), m_ConstantInt(Ex2Idx))) &&
954           Ex1Idx->getZExtValue() + 1 == Ex2Idx->getZExtValue())
955         return VLOperands::ScoreConsecutiveExtracts;
956 
957       auto *I1 = dyn_cast<Instruction>(V1);
958       auto *I2 = dyn_cast<Instruction>(V2);
959       if (I1 && I2) {
960         if (I1 == I2)
961           return VLOperands::ScoreSplat;
962         InstructionsState S = getSameOpcode({I1, I2});
963         // Note: Only consider instructions with <= 2 operands to avoid
964         // complexity explosion.
965         if (S.getOpcode() && S.MainOp->getNumOperands() <= 2)
966           return S.isAltShuffle() ? VLOperands::ScoreAltOpcodes
967                                   : VLOperands::ScoreSameOpcode;
968       }
969 
970       if (isa<UndefValue>(V2))
971         return VLOperands::ScoreUndef;
972 
973       return VLOperands::ScoreFail;
974     }
975 
976     /// Holds the values and their lane that are taking part in the look-ahead
977     /// score calculation. This is used in the external uses cost calculation.
978     SmallDenseMap<Value *, int> InLookAheadValues;
979 
980     /// \Returns the additinal cost due to uses of \p LHS and \p RHS that are
981     /// either external to the vectorized code, or require shuffling.
982     int getExternalUsesCost(const std::pair<Value *, int> &LHS,
983                             const std::pair<Value *, int> &RHS) {
984       int Cost = 0;
985       std::array<std::pair<Value *, int>, 2> Values = {{LHS, RHS}};
986       for (int Idx = 0, IdxE = Values.size(); Idx != IdxE; ++Idx) {
987         Value *V = Values[Idx].first;
988         // Calculate the absolute lane, using the minimum relative lane of LHS
989         // and RHS as base and Idx as the offset.
990         int Ln = std::min(LHS.second, RHS.second) + Idx;
991         assert(Ln >= 0 && "Bad lane calculation");
992         unsigned UsersBudget = LookAheadUsersBudget;
993         for (User *U : V->users()) {
994           if (const TreeEntry *UserTE = R.getTreeEntry(U)) {
995             // The user is in the VectorizableTree. Check if we need to insert.
996             auto It = llvm::find(UserTE->Scalars, U);
997             assert(It != UserTE->Scalars.end() && "U is in UserTE");
998             int UserLn = std::distance(UserTE->Scalars.begin(), It);
999             assert(UserLn >= 0 && "Bad lane");
1000             if (UserLn != Ln)
1001               Cost += UserInDiffLaneCost;
1002           } else {
1003             // Check if the user is in the look-ahead code.
1004             auto It2 = InLookAheadValues.find(U);
1005             if (It2 != InLookAheadValues.end()) {
1006               // The user is in the look-ahead code. Check the lane.
1007               if (It2->second != Ln)
1008                 Cost += UserInDiffLaneCost;
1009             } else {
1010               // The user is neither in SLP tree nor in the look-ahead code.
1011               Cost += ExternalUseCost;
1012             }
1013           }
1014           // Limit the number of visited uses to cap compilation time.
1015           if (--UsersBudget == 0)
1016             break;
1017         }
1018       }
1019       return Cost;
1020     }
1021 
1022     /// Go through the operands of \p LHS and \p RHS recursively until \p
1023     /// MaxLevel, and return the cummulative score. For example:
1024     /// \verbatim
1025     ///  A[0]  B[0]  A[1]  B[1]  C[0] D[0]  B[1] A[1]
1026     ///     \ /         \ /         \ /        \ /
1027     ///      +           +           +          +
1028     ///     G1          G2          G3         G4
1029     /// \endverbatim
1030     /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at
1031     /// each level recursively, accumulating the score. It starts from matching
1032     /// the additions at level 0, then moves on to the loads (level 1). The
1033     /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and
1034     /// {B[0],B[1]} match with VLOperands::ScoreConsecutiveLoads, while
1035     /// {A[0],C[0]} has a score of VLOperands::ScoreFail.
1036     /// Please note that the order of the operands does not matter, as we
1037     /// evaluate the score of all profitable combinations of operands. In
1038     /// other words the score of G1 and G4 is the same as G1 and G2. This
1039     /// heuristic is based on ideas described in:
1040     ///   Look-ahead SLP: Auto-vectorization in the presence of commutative
1041     ///   operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha,
1042     ///   Luís F. W. Góes
1043     int getScoreAtLevelRec(const std::pair<Value *, int> &LHS,
1044                            const std::pair<Value *, int> &RHS, int CurrLevel,
1045                            int MaxLevel) {
1046 
1047       Value *V1 = LHS.first;
1048       Value *V2 = RHS.first;
1049       // Get the shallow score of V1 and V2.
1050       int ShallowScoreAtThisLevel =
1051           std::max((int)ScoreFail, getShallowScore(V1, V2, DL, SE) -
1052                                        getExternalUsesCost(LHS, RHS));
1053       int Lane1 = LHS.second;
1054       int Lane2 = RHS.second;
1055 
1056       // If reached MaxLevel,
1057       //  or if V1 and V2 are not instructions,
1058       //  or if they are SPLAT,
1059       //  or if they are not consecutive, early return the current cost.
1060       auto *I1 = dyn_cast<Instruction>(V1);
1061       auto *I2 = dyn_cast<Instruction>(V2);
1062       if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 ||
1063           ShallowScoreAtThisLevel == VLOperands::ScoreFail ||
1064           (isa<LoadInst>(I1) && isa<LoadInst>(I2) && ShallowScoreAtThisLevel))
1065         return ShallowScoreAtThisLevel;
1066       assert(I1 && I2 && "Should have early exited.");
1067 
1068       // Keep track of in-tree values for determining the external-use cost.
1069       InLookAheadValues[V1] = Lane1;
1070       InLookAheadValues[V2] = Lane2;
1071 
1072       // Contains the I2 operand indexes that got matched with I1 operands.
1073       SmallSet<unsigned, 4> Op2Used;
1074 
1075       // Recursion towards the operands of I1 and I2. We are trying all possbile
1076       // operand pairs, and keeping track of the best score.
1077       for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands();
1078            OpIdx1 != NumOperands1; ++OpIdx1) {
1079         // Try to pair op1I with the best operand of I2.
1080         int MaxTmpScore = 0;
1081         unsigned MaxOpIdx2 = 0;
1082         bool FoundBest = false;
1083         // If I2 is commutative try all combinations.
1084         unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1;
1085         unsigned ToIdx = isCommutative(I2)
1086                              ? I2->getNumOperands()
1087                              : std::min(I2->getNumOperands(), OpIdx1 + 1);
1088         assert(FromIdx <= ToIdx && "Bad index");
1089         for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) {
1090           // Skip operands already paired with OpIdx1.
1091           if (Op2Used.count(OpIdx2))
1092             continue;
1093           // Recursively calculate the cost at each level
1094           int TmpScore = getScoreAtLevelRec({I1->getOperand(OpIdx1), Lane1},
1095                                             {I2->getOperand(OpIdx2), Lane2},
1096                                             CurrLevel + 1, MaxLevel);
1097           // Look for the best score.
1098           if (TmpScore > VLOperands::ScoreFail && TmpScore > MaxTmpScore) {
1099             MaxTmpScore = TmpScore;
1100             MaxOpIdx2 = OpIdx2;
1101             FoundBest = true;
1102           }
1103         }
1104         if (FoundBest) {
1105           // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it.
1106           Op2Used.insert(MaxOpIdx2);
1107           ShallowScoreAtThisLevel += MaxTmpScore;
1108         }
1109       }
1110       return ShallowScoreAtThisLevel;
1111     }
1112 
1113     /// \Returns the look-ahead score, which tells us how much the sub-trees
1114     /// rooted at \p LHS and \p RHS match, the more they match the higher the
1115     /// score. This helps break ties in an informed way when we cannot decide on
1116     /// the order of the operands by just considering the immediate
1117     /// predecessors.
1118     int getLookAheadScore(const std::pair<Value *, int> &LHS,
1119                           const std::pair<Value *, int> &RHS) {
1120       InLookAheadValues.clear();
1121       return getScoreAtLevelRec(LHS, RHS, 1, LookAheadMaxDepth);
1122     }
1123 
1124     // Search all operands in Ops[*][Lane] for the one that matches best
1125     // Ops[OpIdx][LastLane] and return its opreand index.
1126     // If no good match can be found, return None.
1127     Optional<unsigned>
1128     getBestOperand(unsigned OpIdx, int Lane, int LastLane,
1129                    ArrayRef<ReorderingMode> ReorderingModes) {
1130       unsigned NumOperands = getNumOperands();
1131 
1132       // The operand of the previous lane at OpIdx.
1133       Value *OpLastLane = getData(OpIdx, LastLane).V;
1134 
1135       // Our strategy mode for OpIdx.
1136       ReorderingMode RMode = ReorderingModes[OpIdx];
1137 
1138       // The linearized opcode of the operand at OpIdx, Lane.
1139       bool OpIdxAPO = getData(OpIdx, Lane).APO;
1140 
1141       // The best operand index and its score.
1142       // Sometimes we have more than one option (e.g., Opcode and Undefs), so we
1143       // are using the score to differentiate between the two.
1144       struct BestOpData {
1145         Optional<unsigned> Idx = None;
1146         unsigned Score = 0;
1147       } BestOp;
1148 
1149       // Iterate through all unused operands and look for the best.
1150       for (unsigned Idx = 0; Idx != NumOperands; ++Idx) {
1151         // Get the operand at Idx and Lane.
1152         OperandData &OpData = getData(Idx, Lane);
1153         Value *Op = OpData.V;
1154         bool OpAPO = OpData.APO;
1155 
1156         // Skip already selected operands.
1157         if (OpData.IsUsed)
1158           continue;
1159 
1160         // Skip if we are trying to move the operand to a position with a
1161         // different opcode in the linearized tree form. This would break the
1162         // semantics.
1163         if (OpAPO != OpIdxAPO)
1164           continue;
1165 
1166         // Look for an operand that matches the current mode.
1167         switch (RMode) {
1168         case ReorderingMode::Load:
1169         case ReorderingMode::Constant:
1170         case ReorderingMode::Opcode: {
1171           bool LeftToRight = Lane > LastLane;
1172           Value *OpLeft = (LeftToRight) ? OpLastLane : Op;
1173           Value *OpRight = (LeftToRight) ? Op : OpLastLane;
1174           unsigned Score =
1175               getLookAheadScore({OpLeft, LastLane}, {OpRight, Lane});
1176           if (Score > BestOp.Score) {
1177             BestOp.Idx = Idx;
1178             BestOp.Score = Score;
1179           }
1180           break;
1181         }
1182         case ReorderingMode::Splat:
1183           if (Op == OpLastLane)
1184             BestOp.Idx = Idx;
1185           break;
1186         case ReorderingMode::Failed:
1187           return None;
1188         }
1189       }
1190 
1191       if (BestOp.Idx) {
1192         getData(BestOp.Idx.getValue(), Lane).IsUsed = true;
1193         return BestOp.Idx;
1194       }
1195       // If we could not find a good match return None.
1196       return None;
1197     }
1198 
1199     /// Helper for reorderOperandVecs. \Returns the lane that we should start
1200     /// reordering from. This is the one which has the least number of operands
1201     /// that can freely move about.
1202     unsigned getBestLaneToStartReordering() const {
1203       unsigned BestLane = 0;
1204       unsigned Min = UINT_MAX;
1205       for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes;
1206            ++Lane) {
1207         unsigned NumFreeOps = getMaxNumOperandsThatCanBeReordered(Lane);
1208         if (NumFreeOps < Min) {
1209           Min = NumFreeOps;
1210           BestLane = Lane;
1211         }
1212       }
1213       return BestLane;
1214     }
1215 
1216     /// \Returns the maximum number of operands that are allowed to be reordered
1217     /// for \p Lane. This is used as a heuristic for selecting the first lane to
1218     /// start operand reordering.
1219     unsigned getMaxNumOperandsThatCanBeReordered(unsigned Lane) const {
1220       unsigned CntTrue = 0;
1221       unsigned NumOperands = getNumOperands();
1222       // Operands with the same APO can be reordered. We therefore need to count
1223       // how many of them we have for each APO, like this: Cnt[APO] = x.
1224       // Since we only have two APOs, namely true and false, we can avoid using
1225       // a map. Instead we can simply count the number of operands that
1226       // correspond to one of them (in this case the 'true' APO), and calculate
1227       // the other by subtracting it from the total number of operands.
1228       for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx)
1229         if (getData(OpIdx, Lane).APO)
1230           ++CntTrue;
1231       unsigned CntFalse = NumOperands - CntTrue;
1232       return std::max(CntTrue, CntFalse);
1233     }
1234 
1235     /// Go through the instructions in VL and append their operands.
1236     void appendOperandsOfVL(ArrayRef<Value *> VL) {
1237       assert(!VL.empty() && "Bad VL");
1238       assert((empty() || VL.size() == getNumLanes()) &&
1239              "Expected same number of lanes");
1240       assert(isa<Instruction>(VL[0]) && "Expected instruction");
1241       unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands();
1242       OpsVec.resize(NumOperands);
1243       unsigned NumLanes = VL.size();
1244       for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1245         OpsVec[OpIdx].resize(NumLanes);
1246         for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1247           assert(isa<Instruction>(VL[Lane]) && "Expected instruction");
1248           // Our tree has just 3 nodes: the root and two operands.
1249           // It is therefore trivial to get the APO. We only need to check the
1250           // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or
1251           // RHS operand. The LHS operand of both add and sub is never attached
1252           // to an inversese operation in the linearized form, therefore its APO
1253           // is false. The RHS is true only if VL[Lane] is an inverse operation.
1254 
1255           // Since operand reordering is performed on groups of commutative
1256           // operations or alternating sequences (e.g., +, -), we can safely
1257           // tell the inverse operations by checking commutativity.
1258           bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane]));
1259           bool APO = (OpIdx == 0) ? false : IsInverseOperation;
1260           OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx),
1261                                  APO, false};
1262         }
1263       }
1264     }
1265 
1266     /// \returns the number of operands.
1267     unsigned getNumOperands() const { return OpsVec.size(); }
1268 
1269     /// \returns the number of lanes.
1270     unsigned getNumLanes() const { return OpsVec[0].size(); }
1271 
1272     /// \returns the operand value at \p OpIdx and \p Lane.
1273     Value *getValue(unsigned OpIdx, unsigned Lane) const {
1274       return getData(OpIdx, Lane).V;
1275     }
1276 
1277     /// \returns true if the data structure is empty.
1278     bool empty() const { return OpsVec.empty(); }
1279 
1280     /// Clears the data.
1281     void clear() { OpsVec.clear(); }
1282 
1283     /// \Returns true if there are enough operands identical to \p Op to fill
1284     /// the whole vector.
1285     /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow.
1286     bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) {
1287       bool OpAPO = getData(OpIdx, Lane).APO;
1288       for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) {
1289         if (Ln == Lane)
1290           continue;
1291         // This is set to true if we found a candidate for broadcast at Lane.
1292         bool FoundCandidate = false;
1293         for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) {
1294           OperandData &Data = getData(OpI, Ln);
1295           if (Data.APO != OpAPO || Data.IsUsed)
1296             continue;
1297           if (Data.V == Op) {
1298             FoundCandidate = true;
1299             Data.IsUsed = true;
1300             break;
1301           }
1302         }
1303         if (!FoundCandidate)
1304           return false;
1305       }
1306       return true;
1307     }
1308 
1309   public:
1310     /// Initialize with all the operands of the instruction vector \p RootVL.
1311     VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL,
1312                ScalarEvolution &SE, const BoUpSLP &R)
1313         : DL(DL), SE(SE), R(R) {
1314       // Append all the operands of RootVL.
1315       appendOperandsOfVL(RootVL);
1316     }
1317 
1318     /// \Returns a value vector with the operands across all lanes for the
1319     /// opearnd at \p OpIdx.
1320     ValueList getVL(unsigned OpIdx) const {
1321       ValueList OpVL(OpsVec[OpIdx].size());
1322       assert(OpsVec[OpIdx].size() == getNumLanes() &&
1323              "Expected same num of lanes across all operands");
1324       for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane)
1325         OpVL[Lane] = OpsVec[OpIdx][Lane].V;
1326       return OpVL;
1327     }
1328 
1329     // Performs operand reordering for 2 or more operands.
1330     // The original operands are in OrigOps[OpIdx][Lane].
1331     // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'.
1332     void reorder() {
1333       unsigned NumOperands = getNumOperands();
1334       unsigned NumLanes = getNumLanes();
1335       // Each operand has its own mode. We are using this mode to help us select
1336       // the instructions for each lane, so that they match best with the ones
1337       // we have selected so far.
1338       SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands);
1339 
1340       // This is a greedy single-pass algorithm. We are going over each lane
1341       // once and deciding on the best order right away with no back-tracking.
1342       // However, in order to increase its effectiveness, we start with the lane
1343       // that has operands that can move the least. For example, given the
1344       // following lanes:
1345       //  Lane 0 : A[0] = B[0] + C[0]   // Visited 3rd
1346       //  Lane 1 : A[1] = C[1] - B[1]   // Visited 1st
1347       //  Lane 2 : A[2] = B[2] + C[2]   // Visited 2nd
1348       //  Lane 3 : A[3] = C[3] - B[3]   // Visited 4th
1349       // we will start at Lane 1, since the operands of the subtraction cannot
1350       // be reordered. Then we will visit the rest of the lanes in a circular
1351       // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3.
1352 
1353       // Find the first lane that we will start our search from.
1354       unsigned FirstLane = getBestLaneToStartReordering();
1355 
1356       // Initialize the modes.
1357       for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1358         Value *OpLane0 = getValue(OpIdx, FirstLane);
1359         // Keep track if we have instructions with all the same opcode on one
1360         // side.
1361         if (isa<LoadInst>(OpLane0))
1362           ReorderingModes[OpIdx] = ReorderingMode::Load;
1363         else if (isa<Instruction>(OpLane0)) {
1364           // Check if OpLane0 should be broadcast.
1365           if (shouldBroadcast(OpLane0, OpIdx, FirstLane))
1366             ReorderingModes[OpIdx] = ReorderingMode::Splat;
1367           else
1368             ReorderingModes[OpIdx] = ReorderingMode::Opcode;
1369         }
1370         else if (isa<Constant>(OpLane0))
1371           ReorderingModes[OpIdx] = ReorderingMode::Constant;
1372         else if (isa<Argument>(OpLane0))
1373           // Our best hope is a Splat. It may save some cost in some cases.
1374           ReorderingModes[OpIdx] = ReorderingMode::Splat;
1375         else
1376           // NOTE: This should be unreachable.
1377           ReorderingModes[OpIdx] = ReorderingMode::Failed;
1378       }
1379 
1380       // If the initial strategy fails for any of the operand indexes, then we
1381       // perform reordering again in a second pass. This helps avoid assigning
1382       // high priority to the failed strategy, and should improve reordering for
1383       // the non-failed operand indexes.
1384       for (int Pass = 0; Pass != 2; ++Pass) {
1385         // Skip the second pass if the first pass did not fail.
1386         bool StrategyFailed = false;
1387         // Mark all operand data as free to use.
1388         clearUsed();
1389         // We keep the original operand order for the FirstLane, so reorder the
1390         // rest of the lanes. We are visiting the nodes in a circular fashion,
1391         // using FirstLane as the center point and increasing the radius
1392         // distance.
1393         for (unsigned Distance = 1; Distance != NumLanes; ++Distance) {
1394           // Visit the lane on the right and then the lane on the left.
1395           for (int Direction : {+1, -1}) {
1396             int Lane = FirstLane + Direction * Distance;
1397             if (Lane < 0 || Lane >= (int)NumLanes)
1398               continue;
1399             int LastLane = Lane - Direction;
1400             assert(LastLane >= 0 && LastLane < (int)NumLanes &&
1401                    "Out of bounds");
1402             // Look for a good match for each operand.
1403             for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1404               // Search for the operand that matches SortedOps[OpIdx][Lane-1].
1405               Optional<unsigned> BestIdx =
1406                   getBestOperand(OpIdx, Lane, LastLane, ReorderingModes);
1407               // By not selecting a value, we allow the operands that follow to
1408               // select a better matching value. We will get a non-null value in
1409               // the next run of getBestOperand().
1410               if (BestIdx) {
1411                 // Swap the current operand with the one returned by
1412                 // getBestOperand().
1413                 swap(OpIdx, BestIdx.getValue(), Lane);
1414               } else {
1415                 // We failed to find a best operand, set mode to 'Failed'.
1416                 ReorderingModes[OpIdx] = ReorderingMode::Failed;
1417                 // Enable the second pass.
1418                 StrategyFailed = true;
1419               }
1420             }
1421           }
1422         }
1423         // Skip second pass if the strategy did not fail.
1424         if (!StrategyFailed)
1425           break;
1426       }
1427     }
1428 
1429 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1430     LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) {
1431       switch (RMode) {
1432       case ReorderingMode::Load:
1433         return "Load";
1434       case ReorderingMode::Opcode:
1435         return "Opcode";
1436       case ReorderingMode::Constant:
1437         return "Constant";
1438       case ReorderingMode::Splat:
1439         return "Splat";
1440       case ReorderingMode::Failed:
1441         return "Failed";
1442       }
1443       llvm_unreachable("Unimplemented Reordering Type");
1444     }
1445 
1446     LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode,
1447                                                    raw_ostream &OS) {
1448       return OS << getModeStr(RMode);
1449     }
1450 
1451     /// Debug print.
1452     LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) {
1453       printMode(RMode, dbgs());
1454     }
1455 
1456     friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) {
1457       return printMode(RMode, OS);
1458     }
1459 
1460     LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const {
1461       const unsigned Indent = 2;
1462       unsigned Cnt = 0;
1463       for (const OperandDataVec &OpDataVec : OpsVec) {
1464         OS << "Operand " << Cnt++ << "\n";
1465         for (const OperandData &OpData : OpDataVec) {
1466           OS.indent(Indent) << "{";
1467           if (Value *V = OpData.V)
1468             OS << *V;
1469           else
1470             OS << "null";
1471           OS << ", APO:" << OpData.APO << "}\n";
1472         }
1473         OS << "\n";
1474       }
1475       return OS;
1476     }
1477 
1478     /// Debug print.
1479     LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
1480 #endif
1481   };
1482 
1483   /// Checks if the instruction is marked for deletion.
1484   bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); }
1485 
1486   /// Marks values operands for later deletion by replacing them with Undefs.
1487   void eraseInstructions(ArrayRef<Value *> AV);
1488 
1489   ~BoUpSLP();
1490 
1491 private:
1492   /// Checks if all users of \p I are the part of the vectorization tree.
1493   bool areAllUsersVectorized(Instruction *I) const;
1494 
1495   /// \returns the cost of the vectorizable entry.
1496   int getEntryCost(TreeEntry *E);
1497 
1498   /// This is the recursive part of buildTree.
1499   void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth,
1500                      const EdgeInfo &EI);
1501 
1502   /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can
1503   /// be vectorized to use the original vector (or aggregate "bitcast" to a
1504   /// vector) and sets \p CurrentOrder to the identity permutation; otherwise
1505   /// returns false, setting \p CurrentOrder to either an empty vector or a
1506   /// non-identity permutation that allows to reuse extract instructions.
1507   bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
1508                        SmallVectorImpl<unsigned> &CurrentOrder) const;
1509 
1510   /// Vectorize a single entry in the tree.
1511   Value *vectorizeTree(TreeEntry *E);
1512 
1513   /// Vectorize a single entry in the tree, starting in \p VL.
1514   Value *vectorizeTree(ArrayRef<Value *> VL);
1515 
1516   /// \returns the scalarization cost for this type. Scalarization in this
1517   /// context means the creation of vectors from a group of scalars.
1518   int getGatherCost(FixedVectorType *Ty,
1519                     const DenseSet<unsigned> &ShuffledIndices) const;
1520 
1521   /// \returns the scalarization cost for this list of values. Assuming that
1522   /// this subtree gets vectorized, we may need to extract the values from the
1523   /// roots. This method calculates the cost of extracting the values.
1524   int getGatherCost(ArrayRef<Value *> VL) const;
1525 
1526   /// Set the Builder insert point to one after the last instruction in
1527   /// the bundle
1528   void setInsertPointAfterBundle(TreeEntry *E);
1529 
1530   /// \returns a vector from a collection of scalars in \p VL.
1531   Value *gather(ArrayRef<Value *> VL);
1532 
1533   /// \returns whether the VectorizableTree is fully vectorizable and will
1534   /// be beneficial even the tree height is tiny.
1535   bool isFullyVectorizableTinyTree() const;
1536 
1537   /// Reorder commutative or alt operands to get better probability of
1538   /// generating vectorized code.
1539   static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
1540                                              SmallVectorImpl<Value *> &Left,
1541                                              SmallVectorImpl<Value *> &Right,
1542                                              const DataLayout &DL,
1543                                              ScalarEvolution &SE,
1544                                              const BoUpSLP &R);
1545   struct TreeEntry {
1546     using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>;
1547     TreeEntry(VecTreeTy &Container) : Container(Container) {}
1548 
1549     /// \returns true if the scalars in VL are equal to this entry.
1550     bool isSame(ArrayRef<Value *> VL) const {
1551       if (VL.size() == Scalars.size())
1552         return std::equal(VL.begin(), VL.end(), Scalars.begin());
1553       return VL.size() == ReuseShuffleIndices.size() &&
1554              std::equal(
1555                  VL.begin(), VL.end(), ReuseShuffleIndices.begin(),
1556                  [this](Value *V, int Idx) { return V == Scalars[Idx]; });
1557     }
1558 
1559     /// A vector of scalars.
1560     ValueList Scalars;
1561 
1562     /// The Scalars are vectorized into this value. It is initialized to Null.
1563     Value *VectorizedValue = nullptr;
1564 
1565     /// Do we need to gather this sequence or vectorize it
1566     /// (either with vector instruction or with scatter/gather
1567     /// intrinsics for store/load)?
1568     enum EntryState { Vectorize, ScatterVectorize, NeedToGather };
1569     EntryState State;
1570 
1571     /// Does this sequence require some shuffling?
1572     SmallVector<int, 4> ReuseShuffleIndices;
1573 
1574     /// Does this entry require reordering?
1575     SmallVector<unsigned, 4> ReorderIndices;
1576 
1577     /// Points back to the VectorizableTree.
1578     ///
1579     /// Only used for Graphviz right now.  Unfortunately GraphTrait::NodeRef has
1580     /// to be a pointer and needs to be able to initialize the child iterator.
1581     /// Thus we need a reference back to the container to translate the indices
1582     /// to entries.
1583     VecTreeTy &Container;
1584 
1585     /// The TreeEntry index containing the user of this entry.  We can actually
1586     /// have multiple users so the data structure is not truly a tree.
1587     SmallVector<EdgeInfo, 1> UserTreeIndices;
1588 
1589     /// The index of this treeEntry in VectorizableTree.
1590     int Idx = -1;
1591 
1592   private:
1593     /// The operands of each instruction in each lane Operands[op_index][lane].
1594     /// Note: This helps avoid the replication of the code that performs the
1595     /// reordering of operands during buildTree_rec() and vectorizeTree().
1596     SmallVector<ValueList, 2> Operands;
1597 
1598     /// The main/alternate instruction.
1599     Instruction *MainOp = nullptr;
1600     Instruction *AltOp = nullptr;
1601 
1602   public:
1603     /// Set this bundle's \p OpIdx'th operand to \p OpVL.
1604     void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) {
1605       if (Operands.size() < OpIdx + 1)
1606         Operands.resize(OpIdx + 1);
1607       assert(Operands[OpIdx].size() == 0 && "Already resized?");
1608       Operands[OpIdx].resize(Scalars.size());
1609       for (unsigned Lane = 0, E = Scalars.size(); Lane != E; ++Lane)
1610         Operands[OpIdx][Lane] = OpVL[Lane];
1611     }
1612 
1613     /// Set the operands of this bundle in their original order.
1614     void setOperandsInOrder() {
1615       assert(Operands.empty() && "Already initialized?");
1616       auto *I0 = cast<Instruction>(Scalars[0]);
1617       Operands.resize(I0->getNumOperands());
1618       unsigned NumLanes = Scalars.size();
1619       for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands();
1620            OpIdx != NumOperands; ++OpIdx) {
1621         Operands[OpIdx].resize(NumLanes);
1622         for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1623           auto *I = cast<Instruction>(Scalars[Lane]);
1624           assert(I->getNumOperands() == NumOperands &&
1625                  "Expected same number of operands");
1626           Operands[OpIdx][Lane] = I->getOperand(OpIdx);
1627         }
1628       }
1629     }
1630 
1631     /// \returns the \p OpIdx operand of this TreeEntry.
1632     ValueList &getOperand(unsigned OpIdx) {
1633       assert(OpIdx < Operands.size() && "Off bounds");
1634       return Operands[OpIdx];
1635     }
1636 
1637     /// \returns the number of operands.
1638     unsigned getNumOperands() const { return Operands.size(); }
1639 
1640     /// \return the single \p OpIdx operand.
1641     Value *getSingleOperand(unsigned OpIdx) const {
1642       assert(OpIdx < Operands.size() && "Off bounds");
1643       assert(!Operands[OpIdx].empty() && "No operand available");
1644       return Operands[OpIdx][0];
1645     }
1646 
1647     /// Some of the instructions in the list have alternate opcodes.
1648     bool isAltShuffle() const {
1649       return getOpcode() != getAltOpcode();
1650     }
1651 
1652     bool isOpcodeOrAlt(Instruction *I) const {
1653       unsigned CheckedOpcode = I->getOpcode();
1654       return (getOpcode() == CheckedOpcode ||
1655               getAltOpcode() == CheckedOpcode);
1656     }
1657 
1658     /// Chooses the correct key for scheduling data. If \p Op has the same (or
1659     /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is
1660     /// \p OpValue.
1661     Value *isOneOf(Value *Op) const {
1662       auto *I = dyn_cast<Instruction>(Op);
1663       if (I && isOpcodeOrAlt(I))
1664         return Op;
1665       return MainOp;
1666     }
1667 
1668     void setOperations(const InstructionsState &S) {
1669       MainOp = S.MainOp;
1670       AltOp = S.AltOp;
1671     }
1672 
1673     Instruction *getMainOp() const {
1674       return MainOp;
1675     }
1676 
1677     Instruction *getAltOp() const {
1678       return AltOp;
1679     }
1680 
1681     /// The main/alternate opcodes for the list of instructions.
1682     unsigned getOpcode() const {
1683       return MainOp ? MainOp->getOpcode() : 0;
1684     }
1685 
1686     unsigned getAltOpcode() const {
1687       return AltOp ? AltOp->getOpcode() : 0;
1688     }
1689 
1690     /// Update operations state of this entry if reorder occurred.
1691     bool updateStateIfReorder() {
1692       if (ReorderIndices.empty())
1693         return false;
1694       InstructionsState S = getSameOpcode(Scalars, ReorderIndices.front());
1695       setOperations(S);
1696       return true;
1697     }
1698 
1699 #ifndef NDEBUG
1700     /// Debug printer.
1701     LLVM_DUMP_METHOD void dump() const {
1702       dbgs() << Idx << ".\n";
1703       for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) {
1704         dbgs() << "Operand " << OpI << ":\n";
1705         for (const Value *V : Operands[OpI])
1706           dbgs().indent(2) << *V << "\n";
1707       }
1708       dbgs() << "Scalars: \n";
1709       for (Value *V : Scalars)
1710         dbgs().indent(2) << *V << "\n";
1711       dbgs() << "State: ";
1712       switch (State) {
1713       case Vectorize:
1714         dbgs() << "Vectorize\n";
1715         break;
1716       case ScatterVectorize:
1717         dbgs() << "ScatterVectorize\n";
1718         break;
1719       case NeedToGather:
1720         dbgs() << "NeedToGather\n";
1721         break;
1722       }
1723       dbgs() << "MainOp: ";
1724       if (MainOp)
1725         dbgs() << *MainOp << "\n";
1726       else
1727         dbgs() << "NULL\n";
1728       dbgs() << "AltOp: ";
1729       if (AltOp)
1730         dbgs() << *AltOp << "\n";
1731       else
1732         dbgs() << "NULL\n";
1733       dbgs() << "VectorizedValue: ";
1734       if (VectorizedValue)
1735         dbgs() << *VectorizedValue << "\n";
1736       else
1737         dbgs() << "NULL\n";
1738       dbgs() << "ReuseShuffleIndices: ";
1739       if (ReuseShuffleIndices.empty())
1740         dbgs() << "Empty";
1741       else
1742         for (unsigned ReuseIdx : ReuseShuffleIndices)
1743           dbgs() << ReuseIdx << ", ";
1744       dbgs() << "\n";
1745       dbgs() << "ReorderIndices: ";
1746       for (unsigned ReorderIdx : ReorderIndices)
1747         dbgs() << ReorderIdx << ", ";
1748       dbgs() << "\n";
1749       dbgs() << "UserTreeIndices: ";
1750       for (const auto &EInfo : UserTreeIndices)
1751         dbgs() << EInfo << ", ";
1752       dbgs() << "\n";
1753     }
1754 #endif
1755   };
1756 
1757 #ifndef NDEBUG
1758   void dumpTreeCosts(TreeEntry *E, int ReuseShuffleCost, int VecCost,
1759                      int ScalarCost) const {
1760     dbgs() << "SLP: Calculated costs for Tree:\n"; E->dump();
1761     dbgs() << "SLP: Costs:\n";
1762     dbgs() << "SLP:     ReuseShuffleCost = " << ReuseShuffleCost << "\n";
1763     dbgs() << "SLP:     VectorCost = " << VecCost << "\n";
1764     dbgs() << "SLP:     ScalarCost = " << ScalarCost << "\n";
1765     dbgs() << "SLP:     ReuseShuffleCost + VecCost - ScalarCost = " <<
1766                ReuseShuffleCost + VecCost - ScalarCost << "\n";
1767   }
1768 #endif
1769 
1770   /// Create a new VectorizableTree entry.
1771   TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle,
1772                           const InstructionsState &S,
1773                           const EdgeInfo &UserTreeIdx,
1774                           ArrayRef<unsigned> ReuseShuffleIndices = None,
1775                           ArrayRef<unsigned> ReorderIndices = None) {
1776     TreeEntry::EntryState EntryState =
1777         Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather;
1778     return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx,
1779                         ReuseShuffleIndices, ReorderIndices);
1780   }
1781 
1782   TreeEntry *newTreeEntry(ArrayRef<Value *> VL,
1783                           TreeEntry::EntryState EntryState,
1784                           Optional<ScheduleData *> Bundle,
1785                           const InstructionsState &S,
1786                           const EdgeInfo &UserTreeIdx,
1787                           ArrayRef<unsigned> ReuseShuffleIndices = None,
1788                           ArrayRef<unsigned> ReorderIndices = None) {
1789     assert(((!Bundle && EntryState == TreeEntry::NeedToGather) ||
1790             (Bundle && EntryState != TreeEntry::NeedToGather)) &&
1791            "Need to vectorize gather entry?");
1792     VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree));
1793     TreeEntry *Last = VectorizableTree.back().get();
1794     Last->Idx = VectorizableTree.size() - 1;
1795     Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end());
1796     Last->State = EntryState;
1797     Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(),
1798                                      ReuseShuffleIndices.end());
1799     Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end());
1800     Last->setOperations(S);
1801     if (Last->State != TreeEntry::NeedToGather) {
1802       for (Value *V : VL) {
1803         assert(!getTreeEntry(V) && "Scalar already in tree!");
1804         ScalarToTreeEntry[V] = Last;
1805       }
1806       // Update the scheduler bundle to point to this TreeEntry.
1807       unsigned Lane = 0;
1808       for (ScheduleData *BundleMember = Bundle.getValue(); BundleMember;
1809            BundleMember = BundleMember->NextInBundle) {
1810         BundleMember->TE = Last;
1811         BundleMember->Lane = Lane;
1812         ++Lane;
1813       }
1814       assert((!Bundle.getValue() || Lane == VL.size()) &&
1815              "Bundle and VL out of sync");
1816     } else {
1817       MustGather.insert(VL.begin(), VL.end());
1818     }
1819 
1820     if (UserTreeIdx.UserTE)
1821       Last->UserTreeIndices.push_back(UserTreeIdx);
1822 
1823     return Last;
1824   }
1825 
1826   /// -- Vectorization State --
1827   /// Holds all of the tree entries.
1828   TreeEntry::VecTreeTy VectorizableTree;
1829 
1830 #ifndef NDEBUG
1831   /// Debug printer.
1832   LLVM_DUMP_METHOD void dumpVectorizableTree() const {
1833     for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) {
1834       VectorizableTree[Id]->dump();
1835       dbgs() << "\n";
1836     }
1837   }
1838 #endif
1839 
1840   TreeEntry *getTreeEntry(Value *V) {
1841     auto I = ScalarToTreeEntry.find(V);
1842     if (I != ScalarToTreeEntry.end())
1843       return I->second;
1844     return nullptr;
1845   }
1846 
1847   const TreeEntry *getTreeEntry(Value *V) const {
1848     auto I = ScalarToTreeEntry.find(V);
1849     if (I != ScalarToTreeEntry.end())
1850       return I->second;
1851     return nullptr;
1852   }
1853 
1854   /// Maps a specific scalar to its tree entry.
1855   SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry;
1856 
1857   /// Maps a value to the proposed vectorizable size.
1858   SmallDenseMap<Value *, unsigned> InstrElementSize;
1859 
1860   /// A list of scalars that we found that we need to keep as scalars.
1861   ValueSet MustGather;
1862 
1863   /// This POD struct describes one external user in the vectorized tree.
1864   struct ExternalUser {
1865     ExternalUser(Value *S, llvm::User *U, int L)
1866         : Scalar(S), User(U), Lane(L) {}
1867 
1868     // Which scalar in our function.
1869     Value *Scalar;
1870 
1871     // Which user that uses the scalar.
1872     llvm::User *User;
1873 
1874     // Which lane does the scalar belong to.
1875     int Lane;
1876   };
1877   using UserList = SmallVector<ExternalUser, 16>;
1878 
1879   /// Checks if two instructions may access the same memory.
1880   ///
1881   /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
1882   /// is invariant in the calling loop.
1883   bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1,
1884                  Instruction *Inst2) {
1885     // First check if the result is already in the cache.
1886     AliasCacheKey key = std::make_pair(Inst1, Inst2);
1887     Optional<bool> &result = AliasCache[key];
1888     if (result.hasValue()) {
1889       return result.getValue();
1890     }
1891     MemoryLocation Loc2 = getLocation(Inst2, AA);
1892     bool aliased = true;
1893     if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) {
1894       // Do the alias check.
1895       aliased = AA->alias(Loc1, Loc2);
1896     }
1897     // Store the result in the cache.
1898     result = aliased;
1899     return aliased;
1900   }
1901 
1902   using AliasCacheKey = std::pair<Instruction *, Instruction *>;
1903 
1904   /// Cache for alias results.
1905   /// TODO: consider moving this to the AliasAnalysis itself.
1906   DenseMap<AliasCacheKey, Optional<bool>> AliasCache;
1907 
1908   /// Removes an instruction from its block and eventually deletes it.
1909   /// It's like Instruction::eraseFromParent() except that the actual deletion
1910   /// is delayed until BoUpSLP is destructed.
1911   /// This is required to ensure that there are no incorrect collisions in the
1912   /// AliasCache, which can happen if a new instruction is allocated at the
1913   /// same address as a previously deleted instruction.
1914   void eraseInstruction(Instruction *I, bool ReplaceOpsWithUndef = false) {
1915     auto It = DeletedInstructions.try_emplace(I, ReplaceOpsWithUndef).first;
1916     It->getSecond() = It->getSecond() && ReplaceOpsWithUndef;
1917   }
1918 
1919   /// Temporary store for deleted instructions. Instructions will be deleted
1920   /// eventually when the BoUpSLP is destructed.
1921   DenseMap<Instruction *, bool> DeletedInstructions;
1922 
1923   /// A list of values that need to extracted out of the tree.
1924   /// This list holds pairs of (Internal Scalar : External User). External User
1925   /// can be nullptr, it means that this Internal Scalar will be used later,
1926   /// after vectorization.
1927   UserList ExternalUses;
1928 
1929   /// Values used only by @llvm.assume calls.
1930   SmallPtrSet<const Value *, 32> EphValues;
1931 
1932   /// Holds all of the instructions that we gathered.
1933   SetVector<Instruction *> GatherSeq;
1934 
1935   /// A list of blocks that we are going to CSE.
1936   SetVector<BasicBlock *> CSEBlocks;
1937 
1938   /// Contains all scheduling relevant data for an instruction.
1939   /// A ScheduleData either represents a single instruction or a member of an
1940   /// instruction bundle (= a group of instructions which is combined into a
1941   /// vector instruction).
1942   struct ScheduleData {
1943     // The initial value for the dependency counters. It means that the
1944     // dependencies are not calculated yet.
1945     enum { InvalidDeps = -1 };
1946 
1947     ScheduleData() = default;
1948 
1949     void init(int BlockSchedulingRegionID, Value *OpVal) {
1950       FirstInBundle = this;
1951       NextInBundle = nullptr;
1952       NextLoadStore = nullptr;
1953       IsScheduled = false;
1954       SchedulingRegionID = BlockSchedulingRegionID;
1955       UnscheduledDepsInBundle = UnscheduledDeps;
1956       clearDependencies();
1957       OpValue = OpVal;
1958       TE = nullptr;
1959       Lane = -1;
1960     }
1961 
1962     /// Returns true if the dependency information has been calculated.
1963     bool hasValidDependencies() const { return Dependencies != InvalidDeps; }
1964 
1965     /// Returns true for single instructions and for bundle representatives
1966     /// (= the head of a bundle).
1967     bool isSchedulingEntity() const { return FirstInBundle == this; }
1968 
1969     /// Returns true if it represents an instruction bundle and not only a
1970     /// single instruction.
1971     bool isPartOfBundle() const {
1972       return NextInBundle != nullptr || FirstInBundle != this;
1973     }
1974 
1975     /// Returns true if it is ready for scheduling, i.e. it has no more
1976     /// unscheduled depending instructions/bundles.
1977     bool isReady() const {
1978       assert(isSchedulingEntity() &&
1979              "can't consider non-scheduling entity for ready list");
1980       return UnscheduledDepsInBundle == 0 && !IsScheduled;
1981     }
1982 
1983     /// Modifies the number of unscheduled dependencies, also updating it for
1984     /// the whole bundle.
1985     int incrementUnscheduledDeps(int Incr) {
1986       UnscheduledDeps += Incr;
1987       return FirstInBundle->UnscheduledDepsInBundle += Incr;
1988     }
1989 
1990     /// Sets the number of unscheduled dependencies to the number of
1991     /// dependencies.
1992     void resetUnscheduledDeps() {
1993       incrementUnscheduledDeps(Dependencies - UnscheduledDeps);
1994     }
1995 
1996     /// Clears all dependency information.
1997     void clearDependencies() {
1998       Dependencies = InvalidDeps;
1999       resetUnscheduledDeps();
2000       MemoryDependencies.clear();
2001     }
2002 
2003     void dump(raw_ostream &os) const {
2004       if (!isSchedulingEntity()) {
2005         os << "/ " << *Inst;
2006       } else if (NextInBundle) {
2007         os << '[' << *Inst;
2008         ScheduleData *SD = NextInBundle;
2009         while (SD) {
2010           os << ';' << *SD->Inst;
2011           SD = SD->NextInBundle;
2012         }
2013         os << ']';
2014       } else {
2015         os << *Inst;
2016       }
2017     }
2018 
2019     Instruction *Inst = nullptr;
2020 
2021     /// Points to the head in an instruction bundle (and always to this for
2022     /// single instructions).
2023     ScheduleData *FirstInBundle = nullptr;
2024 
2025     /// Single linked list of all instructions in a bundle. Null if it is a
2026     /// single instruction.
2027     ScheduleData *NextInBundle = nullptr;
2028 
2029     /// Single linked list of all memory instructions (e.g. load, store, call)
2030     /// in the block - until the end of the scheduling region.
2031     ScheduleData *NextLoadStore = nullptr;
2032 
2033     /// The dependent memory instructions.
2034     /// This list is derived on demand in calculateDependencies().
2035     SmallVector<ScheduleData *, 4> MemoryDependencies;
2036 
2037     /// This ScheduleData is in the current scheduling region if this matches
2038     /// the current SchedulingRegionID of BlockScheduling.
2039     int SchedulingRegionID = 0;
2040 
2041     /// Used for getting a "good" final ordering of instructions.
2042     int SchedulingPriority = 0;
2043 
2044     /// The number of dependencies. Constitutes of the number of users of the
2045     /// instruction plus the number of dependent memory instructions (if any).
2046     /// This value is calculated on demand.
2047     /// If InvalidDeps, the number of dependencies is not calculated yet.
2048     int Dependencies = InvalidDeps;
2049 
2050     /// The number of dependencies minus the number of dependencies of scheduled
2051     /// instructions. As soon as this is zero, the instruction/bundle gets ready
2052     /// for scheduling.
2053     /// Note that this is negative as long as Dependencies is not calculated.
2054     int UnscheduledDeps = InvalidDeps;
2055 
2056     /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for
2057     /// single instructions.
2058     int UnscheduledDepsInBundle = InvalidDeps;
2059 
2060     /// True if this instruction is scheduled (or considered as scheduled in the
2061     /// dry-run).
2062     bool IsScheduled = false;
2063 
2064     /// Opcode of the current instruction in the schedule data.
2065     Value *OpValue = nullptr;
2066 
2067     /// The TreeEntry that this instruction corresponds to.
2068     TreeEntry *TE = nullptr;
2069 
2070     /// The lane of this node in the TreeEntry.
2071     int Lane = -1;
2072   };
2073 
2074 #ifndef NDEBUG
2075   friend inline raw_ostream &operator<<(raw_ostream &os,
2076                                         const BoUpSLP::ScheduleData &SD) {
2077     SD.dump(os);
2078     return os;
2079   }
2080 #endif
2081 
2082   friend struct GraphTraits<BoUpSLP *>;
2083   friend struct DOTGraphTraits<BoUpSLP *>;
2084 
2085   /// Contains all scheduling data for a basic block.
2086   struct BlockScheduling {
2087     BlockScheduling(BasicBlock *BB)
2088         : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {}
2089 
2090     void clear() {
2091       ReadyInsts.clear();
2092       ScheduleStart = nullptr;
2093       ScheduleEnd = nullptr;
2094       FirstLoadStoreInRegion = nullptr;
2095       LastLoadStoreInRegion = nullptr;
2096 
2097       // Reduce the maximum schedule region size by the size of the
2098       // previous scheduling run.
2099       ScheduleRegionSizeLimit -= ScheduleRegionSize;
2100       if (ScheduleRegionSizeLimit < MinScheduleRegionSize)
2101         ScheduleRegionSizeLimit = MinScheduleRegionSize;
2102       ScheduleRegionSize = 0;
2103 
2104       // Make a new scheduling region, i.e. all existing ScheduleData is not
2105       // in the new region yet.
2106       ++SchedulingRegionID;
2107     }
2108 
2109     ScheduleData *getScheduleData(Value *V) {
2110       ScheduleData *SD = ScheduleDataMap[V];
2111       if (SD && SD->SchedulingRegionID == SchedulingRegionID)
2112         return SD;
2113       return nullptr;
2114     }
2115 
2116     ScheduleData *getScheduleData(Value *V, Value *Key) {
2117       if (V == Key)
2118         return getScheduleData(V);
2119       auto I = ExtraScheduleDataMap.find(V);
2120       if (I != ExtraScheduleDataMap.end()) {
2121         ScheduleData *SD = I->second[Key];
2122         if (SD && SD->SchedulingRegionID == SchedulingRegionID)
2123           return SD;
2124       }
2125       return nullptr;
2126     }
2127 
2128     bool isInSchedulingRegion(ScheduleData *SD) const {
2129       return SD->SchedulingRegionID == SchedulingRegionID;
2130     }
2131 
2132     /// Marks an instruction as scheduled and puts all dependent ready
2133     /// instructions into the ready-list.
2134     template <typename ReadyListType>
2135     void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
2136       SD->IsScheduled = true;
2137       LLVM_DEBUG(dbgs() << "SLP:   schedule " << *SD << "\n");
2138 
2139       ScheduleData *BundleMember = SD;
2140       while (BundleMember) {
2141         if (BundleMember->Inst != BundleMember->OpValue) {
2142           BundleMember = BundleMember->NextInBundle;
2143           continue;
2144         }
2145         // Handle the def-use chain dependencies.
2146 
2147         // Decrement the unscheduled counter and insert to ready list if ready.
2148         auto &&DecrUnsched = [this, &ReadyList](Instruction *I) {
2149           doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) {
2150             if (OpDef && OpDef->hasValidDependencies() &&
2151                 OpDef->incrementUnscheduledDeps(-1) == 0) {
2152               // There are no more unscheduled dependencies after
2153               // decrementing, so we can put the dependent instruction
2154               // into the ready list.
2155               ScheduleData *DepBundle = OpDef->FirstInBundle;
2156               assert(!DepBundle->IsScheduled &&
2157                      "already scheduled bundle gets ready");
2158               ReadyList.insert(DepBundle);
2159               LLVM_DEBUG(dbgs()
2160                          << "SLP:    gets ready (def): " << *DepBundle << "\n");
2161             }
2162           });
2163         };
2164 
2165         // If BundleMember is a vector bundle, its operands may have been
2166         // reordered duiring buildTree(). We therefore need to get its operands
2167         // through the TreeEntry.
2168         if (TreeEntry *TE = BundleMember->TE) {
2169           int Lane = BundleMember->Lane;
2170           assert(Lane >= 0 && "Lane not set");
2171 
2172           // Since vectorization tree is being built recursively this assertion
2173           // ensures that the tree entry has all operands set before reaching
2174           // this code. Couple of exceptions known at the moment are extracts
2175           // where their second (immediate) operand is not added. Since
2176           // immediates do not affect scheduler behavior this is considered
2177           // okay.
2178           auto *In = TE->getMainOp();
2179           assert(In &&
2180                  (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) ||
2181                   In->getNumOperands() == TE->getNumOperands()) &&
2182                  "Missed TreeEntry operands?");
2183           (void)In; // fake use to avoid build failure when assertions disabled
2184 
2185           for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands();
2186                OpIdx != NumOperands; ++OpIdx)
2187             if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane]))
2188               DecrUnsched(I);
2189         } else {
2190           // If BundleMember is a stand-alone instruction, no operand reordering
2191           // has taken place, so we directly access its operands.
2192           for (Use &U : BundleMember->Inst->operands())
2193             if (auto *I = dyn_cast<Instruction>(U.get()))
2194               DecrUnsched(I);
2195         }
2196         // Handle the memory dependencies.
2197         for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) {
2198           if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) {
2199             // There are no more unscheduled dependencies after decrementing,
2200             // so we can put the dependent instruction into the ready list.
2201             ScheduleData *DepBundle = MemoryDepSD->FirstInBundle;
2202             assert(!DepBundle->IsScheduled &&
2203                    "already scheduled bundle gets ready");
2204             ReadyList.insert(DepBundle);
2205             LLVM_DEBUG(dbgs()
2206                        << "SLP:    gets ready (mem): " << *DepBundle << "\n");
2207           }
2208         }
2209         BundleMember = BundleMember->NextInBundle;
2210       }
2211     }
2212 
2213     void doForAllOpcodes(Value *V,
2214                          function_ref<void(ScheduleData *SD)> Action) {
2215       if (ScheduleData *SD = getScheduleData(V))
2216         Action(SD);
2217       auto I = ExtraScheduleDataMap.find(V);
2218       if (I != ExtraScheduleDataMap.end())
2219         for (auto &P : I->second)
2220           if (P.second->SchedulingRegionID == SchedulingRegionID)
2221             Action(P.second);
2222     }
2223 
2224     /// Put all instructions into the ReadyList which are ready for scheduling.
2225     template <typename ReadyListType>
2226     void initialFillReadyList(ReadyListType &ReadyList) {
2227       for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
2228         doForAllOpcodes(I, [&](ScheduleData *SD) {
2229           if (SD->isSchedulingEntity() && SD->isReady()) {
2230             ReadyList.insert(SD);
2231             LLVM_DEBUG(dbgs()
2232                        << "SLP:    initially in ready list: " << *I << "\n");
2233           }
2234         });
2235       }
2236     }
2237 
2238     /// Checks if a bundle of instructions can be scheduled, i.e. has no
2239     /// cyclic dependencies. This is only a dry-run, no instructions are
2240     /// actually moved at this stage.
2241     /// \returns the scheduling bundle. The returned Optional value is non-None
2242     /// if \p VL is allowed to be scheduled.
2243     Optional<ScheduleData *>
2244     tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
2245                       const InstructionsState &S);
2246 
2247     /// Un-bundles a group of instructions.
2248     void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue);
2249 
2250     /// Allocates schedule data chunk.
2251     ScheduleData *allocateScheduleDataChunks();
2252 
2253     /// Extends the scheduling region so that V is inside the region.
2254     /// \returns true if the region size is within the limit.
2255     bool extendSchedulingRegion(Value *V, const InstructionsState &S);
2256 
2257     /// Initialize the ScheduleData structures for new instructions in the
2258     /// scheduling region.
2259     void initScheduleData(Instruction *FromI, Instruction *ToI,
2260                           ScheduleData *PrevLoadStore,
2261                           ScheduleData *NextLoadStore);
2262 
2263     /// Updates the dependency information of a bundle and of all instructions/
2264     /// bundles which depend on the original bundle.
2265     void calculateDependencies(ScheduleData *SD, bool InsertInReadyList,
2266                                BoUpSLP *SLP);
2267 
2268     /// Sets all instruction in the scheduling region to un-scheduled.
2269     void resetSchedule();
2270 
2271     BasicBlock *BB;
2272 
2273     /// Simple memory allocation for ScheduleData.
2274     std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks;
2275 
2276     /// The size of a ScheduleData array in ScheduleDataChunks.
2277     int ChunkSize;
2278 
2279     /// The allocator position in the current chunk, which is the last entry
2280     /// of ScheduleDataChunks.
2281     int ChunkPos;
2282 
2283     /// Attaches ScheduleData to Instruction.
2284     /// Note that the mapping survives during all vectorization iterations, i.e.
2285     /// ScheduleData structures are recycled.
2286     DenseMap<Value *, ScheduleData *> ScheduleDataMap;
2287 
2288     /// Attaches ScheduleData to Instruction with the leading key.
2289     DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>>
2290         ExtraScheduleDataMap;
2291 
2292     struct ReadyList : SmallVector<ScheduleData *, 8> {
2293       void insert(ScheduleData *SD) { push_back(SD); }
2294     };
2295 
2296     /// The ready-list for scheduling (only used for the dry-run).
2297     ReadyList ReadyInsts;
2298 
2299     /// The first instruction of the scheduling region.
2300     Instruction *ScheduleStart = nullptr;
2301 
2302     /// The first instruction _after_ the scheduling region.
2303     Instruction *ScheduleEnd = nullptr;
2304 
2305     /// The first memory accessing instruction in the scheduling region
2306     /// (can be null).
2307     ScheduleData *FirstLoadStoreInRegion = nullptr;
2308 
2309     /// The last memory accessing instruction in the scheduling region
2310     /// (can be null).
2311     ScheduleData *LastLoadStoreInRegion = nullptr;
2312 
2313     /// The current size of the scheduling region.
2314     int ScheduleRegionSize = 0;
2315 
2316     /// The maximum size allowed for the scheduling region.
2317     int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget;
2318 
2319     /// The ID of the scheduling region. For a new vectorization iteration this
2320     /// is incremented which "removes" all ScheduleData from the region.
2321     // Make sure that the initial SchedulingRegionID is greater than the
2322     // initial SchedulingRegionID in ScheduleData (which is 0).
2323     int SchedulingRegionID = 1;
2324   };
2325 
2326   /// Attaches the BlockScheduling structures to basic blocks.
2327   MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules;
2328 
2329   /// Performs the "real" scheduling. Done before vectorization is actually
2330   /// performed in a basic block.
2331   void scheduleBlock(BlockScheduling *BS);
2332 
2333   /// List of users to ignore during scheduling and that don't need extracting.
2334   ArrayRef<Value *> UserIgnoreList;
2335 
2336   /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of
2337   /// sorted SmallVectors of unsigned.
2338   struct OrdersTypeDenseMapInfo {
2339     static OrdersType getEmptyKey() {
2340       OrdersType V;
2341       V.push_back(~1U);
2342       return V;
2343     }
2344 
2345     static OrdersType getTombstoneKey() {
2346       OrdersType V;
2347       V.push_back(~2U);
2348       return V;
2349     }
2350 
2351     static unsigned getHashValue(const OrdersType &V) {
2352       return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
2353     }
2354 
2355     static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) {
2356       return LHS == RHS;
2357     }
2358   };
2359 
2360   /// Contains orders of operations along with the number of bundles that have
2361   /// operations in this order. It stores only those orders that require
2362   /// reordering, if reordering is not required it is counted using \a
2363   /// NumOpsWantToKeepOriginalOrder.
2364   DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo> NumOpsWantToKeepOrder;
2365   /// Number of bundles that do not require reordering.
2366   unsigned NumOpsWantToKeepOriginalOrder = 0;
2367 
2368   // Analysis and block reference.
2369   Function *F;
2370   ScalarEvolution *SE;
2371   TargetTransformInfo *TTI;
2372   TargetLibraryInfo *TLI;
2373   AAResults *AA;
2374   LoopInfo *LI;
2375   DominatorTree *DT;
2376   AssumptionCache *AC;
2377   DemandedBits *DB;
2378   const DataLayout *DL;
2379   OptimizationRemarkEmitter *ORE;
2380 
2381   unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt.
2382   unsigned MinVecRegSize; // Set by cl::opt (default: 128).
2383 
2384   /// Instruction builder to construct the vectorized tree.
2385   IRBuilder<> Builder;
2386 
2387   /// A map of scalar integer values to the smallest bit width with which they
2388   /// can legally be represented. The values map to (width, signed) pairs,
2389   /// where "width" indicates the minimum bit width and "signed" is True if the
2390   /// value must be signed-extended, rather than zero-extended, back to its
2391   /// original width.
2392   MapVector<Value *, std::pair<uint64_t, bool>> MinBWs;
2393 };
2394 
2395 } // end namespace slpvectorizer
2396 
2397 template <> struct GraphTraits<BoUpSLP *> {
2398   using TreeEntry = BoUpSLP::TreeEntry;
2399 
2400   /// NodeRef has to be a pointer per the GraphWriter.
2401   using NodeRef = TreeEntry *;
2402 
2403   using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy;
2404 
2405   /// Add the VectorizableTree to the index iterator to be able to return
2406   /// TreeEntry pointers.
2407   struct ChildIteratorType
2408       : public iterator_adaptor_base<
2409             ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> {
2410     ContainerTy &VectorizableTree;
2411 
2412     ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W,
2413                       ContainerTy &VT)
2414         : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {}
2415 
2416     NodeRef operator*() { return I->UserTE; }
2417   };
2418 
2419   static NodeRef getEntryNode(BoUpSLP &R) {
2420     return R.VectorizableTree[0].get();
2421   }
2422 
2423   static ChildIteratorType child_begin(NodeRef N) {
2424     return {N->UserTreeIndices.begin(), N->Container};
2425   }
2426 
2427   static ChildIteratorType child_end(NodeRef N) {
2428     return {N->UserTreeIndices.end(), N->Container};
2429   }
2430 
2431   /// For the node iterator we just need to turn the TreeEntry iterator into a
2432   /// TreeEntry* iterator so that it dereferences to NodeRef.
2433   class nodes_iterator {
2434     using ItTy = ContainerTy::iterator;
2435     ItTy It;
2436 
2437   public:
2438     nodes_iterator(const ItTy &It2) : It(It2) {}
2439     NodeRef operator*() { return It->get(); }
2440     nodes_iterator operator++() {
2441       ++It;
2442       return *this;
2443     }
2444     bool operator!=(const nodes_iterator &N2) const { return N2.It != It; }
2445   };
2446 
2447   static nodes_iterator nodes_begin(BoUpSLP *R) {
2448     return nodes_iterator(R->VectorizableTree.begin());
2449   }
2450 
2451   static nodes_iterator nodes_end(BoUpSLP *R) {
2452     return nodes_iterator(R->VectorizableTree.end());
2453   }
2454 
2455   static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); }
2456 };
2457 
2458 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits {
2459   using TreeEntry = BoUpSLP::TreeEntry;
2460 
2461   DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
2462 
2463   std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) {
2464     std::string Str;
2465     raw_string_ostream OS(Str);
2466     if (isSplat(Entry->Scalars)) {
2467       OS << "<splat> " << *Entry->Scalars[0];
2468       return Str;
2469     }
2470     for (auto V : Entry->Scalars) {
2471       OS << *V;
2472       if (std::any_of(
2473               R->ExternalUses.begin(), R->ExternalUses.end(),
2474               [&](const BoUpSLP::ExternalUser &EU) { return EU.Scalar == V; }))
2475         OS << " <extract>";
2476       OS << "\n";
2477     }
2478     return Str;
2479   }
2480 
2481   static std::string getNodeAttributes(const TreeEntry *Entry,
2482                                        const BoUpSLP *) {
2483     if (Entry->State == TreeEntry::NeedToGather)
2484       return "color=red";
2485     return "";
2486   }
2487 };
2488 
2489 } // end namespace llvm
2490 
2491 BoUpSLP::~BoUpSLP() {
2492   for (const auto &Pair : DeletedInstructions) {
2493     // Replace operands of ignored instructions with Undefs in case if they were
2494     // marked for deletion.
2495     if (Pair.getSecond()) {
2496       Value *Undef = UndefValue::get(Pair.getFirst()->getType());
2497       Pair.getFirst()->replaceAllUsesWith(Undef);
2498     }
2499     Pair.getFirst()->dropAllReferences();
2500   }
2501   for (const auto &Pair : DeletedInstructions) {
2502     assert(Pair.getFirst()->use_empty() &&
2503            "trying to erase instruction with users.");
2504     Pair.getFirst()->eraseFromParent();
2505   }
2506   assert(!verifyFunction(*F, &dbgs()));
2507 }
2508 
2509 void BoUpSLP::eraseInstructions(ArrayRef<Value *> AV) {
2510   for (auto *V : AV) {
2511     if (auto *I = dyn_cast<Instruction>(V))
2512       eraseInstruction(I, /*ReplaceOpsWithUndef=*/true);
2513   };
2514 }
2515 
2516 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
2517                         ArrayRef<Value *> UserIgnoreLst) {
2518   ExtraValueToDebugLocsMap ExternallyUsedValues;
2519   buildTree(Roots, ExternallyUsedValues, UserIgnoreLst);
2520 }
2521 
2522 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
2523                         ExtraValueToDebugLocsMap &ExternallyUsedValues,
2524                         ArrayRef<Value *> UserIgnoreLst) {
2525   deleteTree();
2526   UserIgnoreList = UserIgnoreLst;
2527   if (!allSameType(Roots))
2528     return;
2529   buildTree_rec(Roots, 0, EdgeInfo());
2530 
2531   // Collect the values that we need to extract from the tree.
2532   for (auto &TEPtr : VectorizableTree) {
2533     TreeEntry *Entry = TEPtr.get();
2534 
2535     // No need to handle users of gathered values.
2536     if (Entry->State == TreeEntry::NeedToGather)
2537       continue;
2538 
2539     // For each lane:
2540     for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
2541       Value *Scalar = Entry->Scalars[Lane];
2542       int FoundLane = Lane;
2543       if (!Entry->ReuseShuffleIndices.empty()) {
2544         FoundLane =
2545             std::distance(Entry->ReuseShuffleIndices.begin(),
2546                           llvm::find(Entry->ReuseShuffleIndices, FoundLane));
2547       }
2548 
2549       // Check if the scalar is externally used as an extra arg.
2550       auto ExtI = ExternallyUsedValues.find(Scalar);
2551       if (ExtI != ExternallyUsedValues.end()) {
2552         LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane "
2553                           << Lane << " from " << *Scalar << ".\n");
2554         ExternalUses.emplace_back(Scalar, nullptr, FoundLane);
2555       }
2556       for (User *U : Scalar->users()) {
2557         LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
2558 
2559         Instruction *UserInst = dyn_cast<Instruction>(U);
2560         if (!UserInst)
2561           continue;
2562 
2563         // Skip in-tree scalars that become vectors
2564         if (TreeEntry *UseEntry = getTreeEntry(U)) {
2565           Value *UseScalar = UseEntry->Scalars[0];
2566           // Some in-tree scalars will remain as scalar in vectorized
2567           // instructions. If that is the case, the one in Lane 0 will
2568           // be used.
2569           if (UseScalar != U ||
2570               !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) {
2571             LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
2572                               << ".\n");
2573             assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state");
2574             continue;
2575           }
2576         }
2577 
2578         // Ignore users in the user ignore list.
2579         if (is_contained(UserIgnoreList, UserInst))
2580           continue;
2581 
2582         LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane "
2583                           << Lane << " from " << *Scalar << ".\n");
2584         ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane));
2585       }
2586     }
2587   }
2588 }
2589 
2590 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
2591                             const EdgeInfo &UserTreeIdx) {
2592   assert((allConstant(VL) || allSameType(VL)) && "Invalid types!");
2593 
2594   InstructionsState S = getSameOpcode(VL);
2595   if (Depth == RecursionMaxDepth) {
2596     LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
2597     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2598     return;
2599   }
2600 
2601   // Don't handle vectors.
2602   if (S.OpValue->getType()->isVectorTy()) {
2603     LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
2604     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2605     return;
2606   }
2607 
2608   if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue))
2609     if (SI->getValueOperand()->getType()->isVectorTy()) {
2610       LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
2611       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2612       return;
2613     }
2614 
2615   // If all of the operands are identical or constant we have a simple solution.
2616   if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.getOpcode()) {
2617     LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
2618     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2619     return;
2620   }
2621 
2622   // We now know that this is a vector of instructions of the same type from
2623   // the same block.
2624 
2625   // Don't vectorize ephemeral values.
2626   for (Value *V : VL) {
2627     if (EphValues.count(V)) {
2628       LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
2629                         << ") is ephemeral.\n");
2630       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2631       return;
2632     }
2633   }
2634 
2635   // Check if this is a duplicate of another entry.
2636   if (TreeEntry *E = getTreeEntry(S.OpValue)) {
2637     LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n");
2638     if (!E->isSame(VL)) {
2639       LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
2640       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2641       return;
2642     }
2643     // Record the reuse of the tree node.  FIXME, currently this is only used to
2644     // properly draw the graph rather than for the actual vectorization.
2645     E->UserTreeIndices.push_back(UserTreeIdx);
2646     LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue
2647                       << ".\n");
2648     return;
2649   }
2650 
2651   // Check that none of the instructions in the bundle are already in the tree.
2652   for (Value *V : VL) {
2653     auto *I = dyn_cast<Instruction>(V);
2654     if (!I)
2655       continue;
2656     if (getTreeEntry(I)) {
2657       LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
2658                         << ") is already in tree.\n");
2659       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2660       return;
2661     }
2662   }
2663 
2664   // If any of the scalars is marked as a value that needs to stay scalar, then
2665   // we need to gather the scalars.
2666   // The reduction nodes (stored in UserIgnoreList) also should stay scalar.
2667   for (Value *V : VL) {
2668     if (MustGather.count(V) || is_contained(UserIgnoreList, V)) {
2669       LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
2670       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2671       return;
2672     }
2673   }
2674 
2675   // Check that all of the users of the scalars that we want to vectorize are
2676   // schedulable.
2677   auto *VL0 = cast<Instruction>(S.OpValue);
2678   BasicBlock *BB = VL0->getParent();
2679 
2680   if (!DT->isReachableFromEntry(BB)) {
2681     // Don't go into unreachable blocks. They may contain instructions with
2682     // dependency cycles which confuse the final scheduling.
2683     LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
2684     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2685     return;
2686   }
2687 
2688   // Check that every instruction appears once in this bundle.
2689   SmallVector<unsigned, 4> ReuseShuffleIndicies;
2690   SmallVector<Value *, 4> UniqueValues;
2691   DenseMap<Value *, unsigned> UniquePositions;
2692   for (Value *V : VL) {
2693     auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
2694     ReuseShuffleIndicies.emplace_back(Res.first->second);
2695     if (Res.second)
2696       UniqueValues.emplace_back(V);
2697   }
2698   size_t NumUniqueScalarValues = UniqueValues.size();
2699   if (NumUniqueScalarValues == VL.size()) {
2700     ReuseShuffleIndicies.clear();
2701   } else {
2702     LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n");
2703     if (NumUniqueScalarValues <= 1 ||
2704         !llvm::isPowerOf2_32(NumUniqueScalarValues)) {
2705       LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
2706       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2707       return;
2708     }
2709     VL = UniqueValues;
2710   }
2711 
2712   auto &BSRef = BlocksSchedules[BB];
2713   if (!BSRef)
2714     BSRef = std::make_unique<BlockScheduling>(BB);
2715 
2716   BlockScheduling &BS = *BSRef.get();
2717 
2718   Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S);
2719   if (!Bundle) {
2720     LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
2721     assert((!BS.getScheduleData(VL0) ||
2722             !BS.getScheduleData(VL0)->isPartOfBundle()) &&
2723            "tryScheduleBundle should cancelScheduling on failure");
2724     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2725                  ReuseShuffleIndicies);
2726     return;
2727   }
2728   LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
2729 
2730   unsigned ShuffleOrOp = S.isAltShuffle() ?
2731                 (unsigned) Instruction::ShuffleVector : S.getOpcode();
2732   switch (ShuffleOrOp) {
2733     case Instruction::PHI: {
2734       auto *PH = cast<PHINode>(VL0);
2735 
2736       // Check for terminator values (e.g. invoke).
2737       for (Value *V : VL)
2738         for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) {
2739           Instruction *Term = dyn_cast<Instruction>(
2740               cast<PHINode>(V)->getIncomingValueForBlock(
2741                   PH->getIncomingBlock(I)));
2742           if (Term && Term->isTerminator()) {
2743             LLVM_DEBUG(dbgs()
2744                        << "SLP: Need to swizzle PHINodes (terminator use).\n");
2745             BS.cancelScheduling(VL, VL0);
2746             newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2747                          ReuseShuffleIndicies);
2748             return;
2749           }
2750         }
2751 
2752       TreeEntry *TE =
2753           newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies);
2754       LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
2755 
2756       // Keeps the reordered operands to avoid code duplication.
2757       SmallVector<ValueList, 2> OperandsVec;
2758       for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) {
2759         ValueList Operands;
2760         // Prepare the operand vector.
2761         for (Value *V : VL)
2762           Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(
2763               PH->getIncomingBlock(I)));
2764         TE->setOperand(I, Operands);
2765         OperandsVec.push_back(Operands);
2766       }
2767       for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx)
2768         buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx});
2769       return;
2770     }
2771     case Instruction::ExtractValue:
2772     case Instruction::ExtractElement: {
2773       OrdersType CurrentOrder;
2774       bool Reuse = canReuseExtract(VL, VL0, CurrentOrder);
2775       if (Reuse) {
2776         LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n");
2777         ++NumOpsWantToKeepOriginalOrder;
2778         newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2779                      ReuseShuffleIndicies);
2780         // This is a special case, as it does not gather, but at the same time
2781         // we are not extending buildTree_rec() towards the operands.
2782         ValueList Op0;
2783         Op0.assign(VL.size(), VL0->getOperand(0));
2784         VectorizableTree.back()->setOperand(0, Op0);
2785         return;
2786       }
2787       if (!CurrentOrder.empty()) {
2788         LLVM_DEBUG({
2789           dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
2790                     "with order";
2791           for (unsigned Idx : CurrentOrder)
2792             dbgs() << " " << Idx;
2793           dbgs() << "\n";
2794         });
2795         // Insert new order with initial value 0, if it does not exist,
2796         // otherwise return the iterator to the existing one.
2797         newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2798                      ReuseShuffleIndicies, CurrentOrder);
2799         findRootOrder(CurrentOrder);
2800         ++NumOpsWantToKeepOrder[CurrentOrder];
2801         // This is a special case, as it does not gather, but at the same time
2802         // we are not extending buildTree_rec() towards the operands.
2803         ValueList Op0;
2804         Op0.assign(VL.size(), VL0->getOperand(0));
2805         VectorizableTree.back()->setOperand(0, Op0);
2806         return;
2807       }
2808       LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n");
2809       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2810                    ReuseShuffleIndicies);
2811       BS.cancelScheduling(VL, VL0);
2812       return;
2813     }
2814     case Instruction::Load: {
2815       // Check that a vectorized load would load the same memory as a scalar
2816       // load. For example, we don't want to vectorize loads that are smaller
2817       // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
2818       // treats loading/storing it as an i8 struct. If we vectorize loads/stores
2819       // from such a struct, we read/write packed bits disagreeing with the
2820       // unvectorized version.
2821       Type *ScalarTy = VL0->getType();
2822 
2823       if (DL->getTypeSizeInBits(ScalarTy) !=
2824           DL->getTypeAllocSizeInBits(ScalarTy)) {
2825         BS.cancelScheduling(VL, VL0);
2826         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2827                      ReuseShuffleIndicies);
2828         LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
2829         return;
2830       }
2831 
2832       // Make sure all loads in the bundle are simple - we can't vectorize
2833       // atomic or volatile loads.
2834       SmallVector<Value *, 4> PointerOps(VL.size());
2835       auto POIter = PointerOps.begin();
2836       for (Value *V : VL) {
2837         auto *L = cast<LoadInst>(V);
2838         if (!L->isSimple()) {
2839           BS.cancelScheduling(VL, VL0);
2840           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2841                        ReuseShuffleIndicies);
2842           LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
2843           return;
2844         }
2845         *POIter = L->getPointerOperand();
2846         ++POIter;
2847       }
2848 
2849       OrdersType CurrentOrder;
2850       // Check the order of pointer operands.
2851       if (llvm::sortPtrAccesses(PointerOps, *DL, *SE, CurrentOrder)) {
2852         Value *Ptr0;
2853         Value *PtrN;
2854         if (CurrentOrder.empty()) {
2855           Ptr0 = PointerOps.front();
2856           PtrN = PointerOps.back();
2857         } else {
2858           Ptr0 = PointerOps[CurrentOrder.front()];
2859           PtrN = PointerOps[CurrentOrder.back()];
2860         }
2861         const SCEV *Scev0 = SE->getSCEV(Ptr0);
2862         const SCEV *ScevN = SE->getSCEV(PtrN);
2863         const auto *Diff =
2864             dyn_cast<SCEVConstant>(SE->getMinusSCEV(ScevN, Scev0));
2865         uint64_t Size = DL->getTypeAllocSize(ScalarTy);
2866         // Check that the sorted loads are consecutive.
2867         if (Diff && Diff->getAPInt() == (VL.size() - 1) * Size) {
2868           if (CurrentOrder.empty()) {
2869             // Original loads are consecutive and does not require reordering.
2870             ++NumOpsWantToKeepOriginalOrder;
2871             TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S,
2872                                          UserTreeIdx, ReuseShuffleIndicies);
2873             TE->setOperandsInOrder();
2874             LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n");
2875           } else {
2876             // Need to reorder.
2877             TreeEntry *TE =
2878                 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2879                              ReuseShuffleIndicies, CurrentOrder);
2880             TE->setOperandsInOrder();
2881             LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n");
2882             findRootOrder(CurrentOrder);
2883             ++NumOpsWantToKeepOrder[CurrentOrder];
2884           }
2885           return;
2886         }
2887         // Vectorizing non-consecutive loads with `llvm.masked.gather`.
2888         TreeEntry *TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S,
2889                                      UserTreeIdx, ReuseShuffleIndicies);
2890         TE->setOperandsInOrder();
2891         buildTree_rec(PointerOps, Depth + 1, {TE, 0});
2892         LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n");
2893         return;
2894       }
2895 
2896       LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
2897       BS.cancelScheduling(VL, VL0);
2898       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2899                    ReuseShuffleIndicies);
2900       return;
2901     }
2902     case Instruction::ZExt:
2903     case Instruction::SExt:
2904     case Instruction::FPToUI:
2905     case Instruction::FPToSI:
2906     case Instruction::FPExt:
2907     case Instruction::PtrToInt:
2908     case Instruction::IntToPtr:
2909     case Instruction::SIToFP:
2910     case Instruction::UIToFP:
2911     case Instruction::Trunc:
2912     case Instruction::FPTrunc:
2913     case Instruction::BitCast: {
2914       Type *SrcTy = VL0->getOperand(0)->getType();
2915       for (Value *V : VL) {
2916         Type *Ty = cast<Instruction>(V)->getOperand(0)->getType();
2917         if (Ty != SrcTy || !isValidElementType(Ty)) {
2918           BS.cancelScheduling(VL, VL0);
2919           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2920                        ReuseShuffleIndicies);
2921           LLVM_DEBUG(dbgs()
2922                      << "SLP: Gathering casts with different src types.\n");
2923           return;
2924         }
2925       }
2926       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2927                                    ReuseShuffleIndicies);
2928       LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n");
2929 
2930       TE->setOperandsInOrder();
2931       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
2932         ValueList Operands;
2933         // Prepare the operand vector.
2934         for (Value *V : VL)
2935           Operands.push_back(cast<Instruction>(V)->getOperand(i));
2936 
2937         buildTree_rec(Operands, Depth + 1, {TE, i});
2938       }
2939       return;
2940     }
2941     case Instruction::ICmp:
2942     case Instruction::FCmp: {
2943       // Check that all of the compares have the same predicate.
2944       CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
2945       CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0);
2946       Type *ComparedTy = VL0->getOperand(0)->getType();
2947       for (Value *V : VL) {
2948         CmpInst *Cmp = cast<CmpInst>(V);
2949         if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) ||
2950             Cmp->getOperand(0)->getType() != ComparedTy) {
2951           BS.cancelScheduling(VL, VL0);
2952           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2953                        ReuseShuffleIndicies);
2954           LLVM_DEBUG(dbgs()
2955                      << "SLP: Gathering cmp with different predicate.\n");
2956           return;
2957         }
2958       }
2959 
2960       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2961                                    ReuseShuffleIndicies);
2962       LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n");
2963 
2964       ValueList Left, Right;
2965       if (cast<CmpInst>(VL0)->isCommutative()) {
2966         // Commutative predicate - collect + sort operands of the instructions
2967         // so that each side is more likely to have the same opcode.
2968         assert(P0 == SwapP0 && "Commutative Predicate mismatch");
2969         reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
2970       } else {
2971         // Collect operands - commute if it uses the swapped predicate.
2972         for (Value *V : VL) {
2973           auto *Cmp = cast<CmpInst>(V);
2974           Value *LHS = Cmp->getOperand(0);
2975           Value *RHS = Cmp->getOperand(1);
2976           if (Cmp->getPredicate() != P0)
2977             std::swap(LHS, RHS);
2978           Left.push_back(LHS);
2979           Right.push_back(RHS);
2980         }
2981       }
2982       TE->setOperand(0, Left);
2983       TE->setOperand(1, Right);
2984       buildTree_rec(Left, Depth + 1, {TE, 0});
2985       buildTree_rec(Right, Depth + 1, {TE, 1});
2986       return;
2987     }
2988     case Instruction::Select:
2989     case Instruction::FNeg:
2990     case Instruction::Add:
2991     case Instruction::FAdd:
2992     case Instruction::Sub:
2993     case Instruction::FSub:
2994     case Instruction::Mul:
2995     case Instruction::FMul:
2996     case Instruction::UDiv:
2997     case Instruction::SDiv:
2998     case Instruction::FDiv:
2999     case Instruction::URem:
3000     case Instruction::SRem:
3001     case Instruction::FRem:
3002     case Instruction::Shl:
3003     case Instruction::LShr:
3004     case Instruction::AShr:
3005     case Instruction::And:
3006     case Instruction::Or:
3007     case Instruction::Xor: {
3008       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3009                                    ReuseShuffleIndicies);
3010       LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n");
3011 
3012       // Sort operands of the instructions so that each side is more likely to
3013       // have the same opcode.
3014       if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
3015         ValueList Left, Right;
3016         reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
3017         TE->setOperand(0, Left);
3018         TE->setOperand(1, Right);
3019         buildTree_rec(Left, Depth + 1, {TE, 0});
3020         buildTree_rec(Right, Depth + 1, {TE, 1});
3021         return;
3022       }
3023 
3024       TE->setOperandsInOrder();
3025       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
3026         ValueList Operands;
3027         // Prepare the operand vector.
3028         for (Value *V : VL)
3029           Operands.push_back(cast<Instruction>(V)->getOperand(i));
3030 
3031         buildTree_rec(Operands, Depth + 1, {TE, i});
3032       }
3033       return;
3034     }
3035     case Instruction::GetElementPtr: {
3036       // We don't combine GEPs with complicated (nested) indexing.
3037       for (Value *V : VL) {
3038         if (cast<Instruction>(V)->getNumOperands() != 2) {
3039           LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
3040           BS.cancelScheduling(VL, VL0);
3041           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3042                        ReuseShuffleIndicies);
3043           return;
3044         }
3045       }
3046 
3047       // We can't combine several GEPs into one vector if they operate on
3048       // different types.
3049       Type *Ty0 = VL0->getOperand(0)->getType();
3050       for (Value *V : VL) {
3051         Type *CurTy = cast<Instruction>(V)->getOperand(0)->getType();
3052         if (Ty0 != CurTy) {
3053           LLVM_DEBUG(dbgs()
3054                      << "SLP: not-vectorizable GEP (different types).\n");
3055           BS.cancelScheduling(VL, VL0);
3056           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3057                        ReuseShuffleIndicies);
3058           return;
3059         }
3060       }
3061 
3062       // We don't combine GEPs with non-constant indexes.
3063       Type *Ty1 = VL0->getOperand(1)->getType();
3064       for (Value *V : VL) {
3065         auto Op = cast<Instruction>(V)->getOperand(1);
3066         if (!isa<ConstantInt>(Op) ||
3067             (Op->getType() != Ty1 &&
3068              Op->getType()->getScalarSizeInBits() >
3069                  DL->getIndexSizeInBits(
3070                      V->getType()->getPointerAddressSpace()))) {
3071           LLVM_DEBUG(dbgs()
3072                      << "SLP: not-vectorizable GEP (non-constant indexes).\n");
3073           BS.cancelScheduling(VL, VL0);
3074           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3075                        ReuseShuffleIndicies);
3076           return;
3077         }
3078       }
3079 
3080       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3081                                    ReuseShuffleIndicies);
3082       LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
3083       TE->setOperandsInOrder();
3084       for (unsigned i = 0, e = 2; i < e; ++i) {
3085         ValueList Operands;
3086         // Prepare the operand vector.
3087         for (Value *V : VL)
3088           Operands.push_back(cast<Instruction>(V)->getOperand(i));
3089 
3090         buildTree_rec(Operands, Depth + 1, {TE, i});
3091       }
3092       return;
3093     }
3094     case Instruction::Store: {
3095       // Check if the stores are consecutive or if we need to swizzle them.
3096       llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType();
3097       // Make sure all stores in the bundle are simple - we can't vectorize
3098       // atomic or volatile stores.
3099       SmallVector<Value *, 4> PointerOps(VL.size());
3100       ValueList Operands(VL.size());
3101       auto POIter = PointerOps.begin();
3102       auto OIter = Operands.begin();
3103       for (Value *V : VL) {
3104         auto *SI = cast<StoreInst>(V);
3105         if (!SI->isSimple()) {
3106           BS.cancelScheduling(VL, VL0);
3107           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3108                        ReuseShuffleIndicies);
3109           LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n");
3110           return;
3111         }
3112         *POIter = SI->getPointerOperand();
3113         *OIter = SI->getValueOperand();
3114         ++POIter;
3115         ++OIter;
3116       }
3117 
3118       OrdersType CurrentOrder;
3119       // Check the order of pointer operands.
3120       if (llvm::sortPtrAccesses(PointerOps, *DL, *SE, CurrentOrder)) {
3121         Value *Ptr0;
3122         Value *PtrN;
3123         if (CurrentOrder.empty()) {
3124           Ptr0 = PointerOps.front();
3125           PtrN = PointerOps.back();
3126         } else {
3127           Ptr0 = PointerOps[CurrentOrder.front()];
3128           PtrN = PointerOps[CurrentOrder.back()];
3129         }
3130         const SCEV *Scev0 = SE->getSCEV(Ptr0);
3131         const SCEV *ScevN = SE->getSCEV(PtrN);
3132         const auto *Diff =
3133             dyn_cast<SCEVConstant>(SE->getMinusSCEV(ScevN, Scev0));
3134         uint64_t Size = DL->getTypeAllocSize(ScalarTy);
3135         // Check that the sorted pointer operands are consecutive.
3136         if (Diff && Diff->getAPInt() == (VL.size() - 1) * Size) {
3137           if (CurrentOrder.empty()) {
3138             // Original stores are consecutive and does not require reordering.
3139             ++NumOpsWantToKeepOriginalOrder;
3140             TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S,
3141                                          UserTreeIdx, ReuseShuffleIndicies);
3142             TE->setOperandsInOrder();
3143             buildTree_rec(Operands, Depth + 1, {TE, 0});
3144             LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
3145           } else {
3146             TreeEntry *TE =
3147                 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3148                              ReuseShuffleIndicies, CurrentOrder);
3149             TE->setOperandsInOrder();
3150             buildTree_rec(Operands, Depth + 1, {TE, 0});
3151             LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n");
3152             findRootOrder(CurrentOrder);
3153             ++NumOpsWantToKeepOrder[CurrentOrder];
3154           }
3155           return;
3156         }
3157       }
3158 
3159       BS.cancelScheduling(VL, VL0);
3160       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3161                    ReuseShuffleIndicies);
3162       LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
3163       return;
3164     }
3165     case Instruction::Call: {
3166       // Check if the calls are all to the same vectorizable intrinsic or
3167       // library function.
3168       CallInst *CI = cast<CallInst>(VL0);
3169       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3170 
3171       VFShape Shape = VFShape::get(
3172           *CI, ElementCount::getFixed(static_cast<unsigned int>(VL.size())),
3173           false /*HasGlobalPred*/);
3174       Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3175 
3176       if (!VecFunc && !isTriviallyVectorizable(ID)) {
3177         BS.cancelScheduling(VL, VL0);
3178         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3179                      ReuseShuffleIndicies);
3180         LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
3181         return;
3182       }
3183       Function *F = CI->getCalledFunction();
3184       unsigned NumArgs = CI->getNumArgOperands();
3185       SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr);
3186       for (unsigned j = 0; j != NumArgs; ++j)
3187         if (hasVectorInstrinsicScalarOpd(ID, j))
3188           ScalarArgs[j] = CI->getArgOperand(j);
3189       for (Value *V : VL) {
3190         CallInst *CI2 = dyn_cast<CallInst>(V);
3191         if (!CI2 || CI2->getCalledFunction() != F ||
3192             getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
3193             (VecFunc &&
3194              VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) ||
3195             !CI->hasIdenticalOperandBundleSchema(*CI2)) {
3196           BS.cancelScheduling(VL, VL0);
3197           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3198                        ReuseShuffleIndicies);
3199           LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V
3200                             << "\n");
3201           return;
3202         }
3203         // Some intrinsics have scalar arguments and should be same in order for
3204         // them to be vectorized.
3205         for (unsigned j = 0; j != NumArgs; ++j) {
3206           if (hasVectorInstrinsicScalarOpd(ID, j)) {
3207             Value *A1J = CI2->getArgOperand(j);
3208             if (ScalarArgs[j] != A1J) {
3209               BS.cancelScheduling(VL, VL0);
3210               newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3211                            ReuseShuffleIndicies);
3212               LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
3213                                 << " argument " << ScalarArgs[j] << "!=" << A1J
3214                                 << "\n");
3215               return;
3216             }
3217           }
3218         }
3219         // Verify that the bundle operands are identical between the two calls.
3220         if (CI->hasOperandBundles() &&
3221             !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(),
3222                         CI->op_begin() + CI->getBundleOperandsEndIndex(),
3223                         CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
3224           BS.cancelScheduling(VL, VL0);
3225           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3226                        ReuseShuffleIndicies);
3227           LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:"
3228                             << *CI << "!=" << *V << '\n');
3229           return;
3230         }
3231       }
3232 
3233       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3234                                    ReuseShuffleIndicies);
3235       TE->setOperandsInOrder();
3236       for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
3237         ValueList Operands;
3238         // Prepare the operand vector.
3239         for (Value *V : VL) {
3240           auto *CI2 = cast<CallInst>(V);
3241           Operands.push_back(CI2->getArgOperand(i));
3242         }
3243         buildTree_rec(Operands, Depth + 1, {TE, i});
3244       }
3245       return;
3246     }
3247     case Instruction::ShuffleVector: {
3248       // If this is not an alternate sequence of opcode like add-sub
3249       // then do not vectorize this instruction.
3250       if (!S.isAltShuffle()) {
3251         BS.cancelScheduling(VL, VL0);
3252         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3253                      ReuseShuffleIndicies);
3254         LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
3255         return;
3256       }
3257       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3258                                    ReuseShuffleIndicies);
3259       LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
3260 
3261       // Reorder operands if reordering would enable vectorization.
3262       if (isa<BinaryOperator>(VL0)) {
3263         ValueList Left, Right;
3264         reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
3265         TE->setOperand(0, Left);
3266         TE->setOperand(1, Right);
3267         buildTree_rec(Left, Depth + 1, {TE, 0});
3268         buildTree_rec(Right, Depth + 1, {TE, 1});
3269         return;
3270       }
3271 
3272       TE->setOperandsInOrder();
3273       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
3274         ValueList Operands;
3275         // Prepare the operand vector.
3276         for (Value *V : VL)
3277           Operands.push_back(cast<Instruction>(V)->getOperand(i));
3278 
3279         buildTree_rec(Operands, Depth + 1, {TE, i});
3280       }
3281       return;
3282     }
3283     default:
3284       BS.cancelScheduling(VL, VL0);
3285       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3286                    ReuseShuffleIndicies);
3287       LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
3288       return;
3289   }
3290 }
3291 
3292 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const {
3293   unsigned N = 1;
3294   Type *EltTy = T;
3295 
3296   while (isa<StructType>(EltTy) || isa<ArrayType>(EltTy) ||
3297          isa<VectorType>(EltTy)) {
3298     if (auto *ST = dyn_cast<StructType>(EltTy)) {
3299       // Check that struct is homogeneous.
3300       for (const auto *Ty : ST->elements())
3301         if (Ty != *ST->element_begin())
3302           return 0;
3303       N *= ST->getNumElements();
3304       EltTy = *ST->element_begin();
3305     } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) {
3306       N *= AT->getNumElements();
3307       EltTy = AT->getElementType();
3308     } else {
3309       auto *VT = cast<FixedVectorType>(EltTy);
3310       N *= VT->getNumElements();
3311       EltTy = VT->getElementType();
3312     }
3313   }
3314 
3315   if (!isValidElementType(EltTy))
3316     return 0;
3317   uint64_t VTSize = DL.getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N));
3318   if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T))
3319     return 0;
3320   return N;
3321 }
3322 
3323 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
3324                               SmallVectorImpl<unsigned> &CurrentOrder) const {
3325   Instruction *E0 = cast<Instruction>(OpValue);
3326   assert(E0->getOpcode() == Instruction::ExtractElement ||
3327          E0->getOpcode() == Instruction::ExtractValue);
3328   assert(E0->getOpcode() == getSameOpcode(VL).getOpcode() && "Invalid opcode");
3329   // Check if all of the extracts come from the same vector and from the
3330   // correct offset.
3331   Value *Vec = E0->getOperand(0);
3332 
3333   CurrentOrder.clear();
3334 
3335   // We have to extract from a vector/aggregate with the same number of elements.
3336   unsigned NElts;
3337   if (E0->getOpcode() == Instruction::ExtractValue) {
3338     const DataLayout &DL = E0->getModule()->getDataLayout();
3339     NElts = canMapToVector(Vec->getType(), DL);
3340     if (!NElts)
3341       return false;
3342     // Check if load can be rewritten as load of vector.
3343     LoadInst *LI = dyn_cast<LoadInst>(Vec);
3344     if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size()))
3345       return false;
3346   } else {
3347     NElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
3348   }
3349 
3350   if (NElts != VL.size())
3351     return false;
3352 
3353   // Check that all of the indices extract from the correct offset.
3354   bool ShouldKeepOrder = true;
3355   unsigned E = VL.size();
3356   // Assign to all items the initial value E + 1 so we can check if the extract
3357   // instruction index was used already.
3358   // Also, later we can check that all the indices are used and we have a
3359   // consecutive access in the extract instructions, by checking that no
3360   // element of CurrentOrder still has value E + 1.
3361   CurrentOrder.assign(E, E + 1);
3362   unsigned I = 0;
3363   for (; I < E; ++I) {
3364     auto *Inst = cast<Instruction>(VL[I]);
3365     if (Inst->getOperand(0) != Vec)
3366       break;
3367     Optional<unsigned> Idx = getExtractIndex(Inst);
3368     if (!Idx)
3369       break;
3370     const unsigned ExtIdx = *Idx;
3371     if (ExtIdx != I) {
3372       if (ExtIdx >= E || CurrentOrder[ExtIdx] != E + 1)
3373         break;
3374       ShouldKeepOrder = false;
3375       CurrentOrder[ExtIdx] = I;
3376     } else {
3377       if (CurrentOrder[I] != E + 1)
3378         break;
3379       CurrentOrder[I] = I;
3380     }
3381   }
3382   if (I < E) {
3383     CurrentOrder.clear();
3384     return false;
3385   }
3386 
3387   return ShouldKeepOrder;
3388 }
3389 
3390 bool BoUpSLP::areAllUsersVectorized(Instruction *I) const {
3391   return I->hasOneUse() ||
3392          std::all_of(I->user_begin(), I->user_end(), [this](User *U) {
3393            return ScalarToTreeEntry.count(U) > 0;
3394          });
3395 }
3396 
3397 static std::pair<unsigned, unsigned>
3398 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy,
3399                    TargetTransformInfo *TTI, TargetLibraryInfo *TLI) {
3400   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3401 
3402   // Calculate the cost of the scalar and vector calls.
3403   IntrinsicCostAttributes CostAttrs(ID, *CI, VecTy->getElementCount());
3404   int IntrinsicCost =
3405     TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput);
3406 
3407   auto Shape = VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>(
3408                                      VecTy->getNumElements())),
3409                             false /*HasGlobalPred*/);
3410   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3411   int LibCost = IntrinsicCost;
3412   if (!CI->isNoBuiltin() && VecFunc) {
3413     // Calculate the cost of the vector library call.
3414     SmallVector<Type *, 4> VecTys;
3415     for (Use &Arg : CI->args())
3416       VecTys.push_back(
3417           FixedVectorType::get(Arg->getType(), VecTy->getNumElements()));
3418 
3419     // If the corresponding vector call is cheaper, return its cost.
3420     LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys,
3421                                     TTI::TCK_RecipThroughput);
3422   }
3423   return {IntrinsicCost, LibCost};
3424 }
3425 
3426 int BoUpSLP::getEntryCost(TreeEntry *E) {
3427   ArrayRef<Value*> VL = E->Scalars;
3428 
3429   Type *ScalarTy = VL[0]->getType();
3430   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
3431     ScalarTy = SI->getValueOperand()->getType();
3432   else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0]))
3433     ScalarTy = CI->getOperand(0)->getType();
3434   auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
3435   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
3436 
3437   // If we have computed a smaller type for the expression, update VecTy so
3438   // that the costs will be accurate.
3439   if (MinBWs.count(VL[0]))
3440     VecTy = FixedVectorType::get(
3441         IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size());
3442 
3443   unsigned ReuseShuffleNumbers = E->ReuseShuffleIndices.size();
3444   bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
3445   int ReuseShuffleCost = 0;
3446   if (NeedToShuffleReuses) {
3447     ReuseShuffleCost =
3448         TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy);
3449   }
3450   if (E->State == TreeEntry::NeedToGather) {
3451     if (allConstant(VL))
3452       return 0;
3453     if (isSplat(VL)) {
3454       return ReuseShuffleCost +
3455              TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0);
3456     }
3457     if (E->getOpcode() == Instruction::ExtractElement &&
3458         allSameType(VL) && allSameBlock(VL)) {
3459       Optional<TargetTransformInfo::ShuffleKind> ShuffleKind = isShuffle(VL);
3460       if (ShuffleKind.hasValue()) {
3461         int Cost = TTI->getShuffleCost(ShuffleKind.getValue(), VecTy);
3462         for (auto *V : VL) {
3463           // If all users of instruction are going to be vectorized and this
3464           // instruction itself is not going to be vectorized, consider this
3465           // instruction as dead and remove its cost from the final cost of the
3466           // vectorized tree.
3467           if (areAllUsersVectorized(cast<Instruction>(V)) &&
3468               !ScalarToTreeEntry.count(V)) {
3469             auto *IO = cast<ConstantInt>(
3470                 cast<ExtractElementInst>(V)->getIndexOperand());
3471             Cost -= TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy,
3472                                             IO->getZExtValue());
3473           }
3474         }
3475         return ReuseShuffleCost + Cost;
3476       }
3477     }
3478     return ReuseShuffleCost + getGatherCost(VL);
3479   }
3480   assert((E->State == TreeEntry::Vectorize ||
3481           E->State == TreeEntry::ScatterVectorize) &&
3482          "Unhandled state");
3483   assert(E->getOpcode() && allSameType(VL) && allSameBlock(VL) && "Invalid VL");
3484   Instruction *VL0 = E->getMainOp();
3485   unsigned ShuffleOrOp =
3486       E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
3487   switch (ShuffleOrOp) {
3488     case Instruction::PHI:
3489       return 0;
3490 
3491     case Instruction::ExtractValue:
3492     case Instruction::ExtractElement: {
3493       int DeadCost = 0;
3494       if (NeedToShuffleReuses) {
3495         unsigned Idx = 0;
3496         for (unsigned I : E->ReuseShuffleIndices) {
3497           if (ShuffleOrOp == Instruction::ExtractElement) {
3498             auto *IO = cast<ConstantInt>(
3499                 cast<ExtractElementInst>(VL[I])->getIndexOperand());
3500             Idx = IO->getZExtValue();
3501             ReuseShuffleCost -= TTI->getVectorInstrCost(
3502                 Instruction::ExtractElement, VecTy, Idx);
3503           } else {
3504             ReuseShuffleCost -= TTI->getVectorInstrCost(
3505                 Instruction::ExtractElement, VecTy, Idx);
3506             ++Idx;
3507           }
3508         }
3509         Idx = ReuseShuffleNumbers;
3510         for (Value *V : VL) {
3511           if (ShuffleOrOp == Instruction::ExtractElement) {
3512             auto *IO = cast<ConstantInt>(
3513                 cast<ExtractElementInst>(V)->getIndexOperand());
3514             Idx = IO->getZExtValue();
3515           } else {
3516             --Idx;
3517           }
3518           ReuseShuffleCost +=
3519               TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, Idx);
3520         }
3521         DeadCost = ReuseShuffleCost;
3522       } else if (!E->ReorderIndices.empty()) {
3523         DeadCost = TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc,
3524                                        VecTy);
3525       }
3526       for (unsigned I = 0, E = VL.size(); I < E; ++I) {
3527         Instruction *EI = cast<Instruction>(VL[I]);
3528         // If all users are going to be vectorized, instruction can be
3529         // considered as dead.
3530         // The same, if have only one user, it will be vectorized for sure.
3531         if (areAllUsersVectorized(EI)) {
3532           // Take credit for instruction that will become dead.
3533           if (EI->hasOneUse()) {
3534             Instruction *Ext = EI->user_back();
3535             if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
3536                 all_of(Ext->users(),
3537                        [](User *U) { return isa<GetElementPtrInst>(U); })) {
3538               // Use getExtractWithExtendCost() to calculate the cost of
3539               // extractelement/ext pair.
3540               DeadCost -= TTI->getExtractWithExtendCost(
3541                   Ext->getOpcode(), Ext->getType(), VecTy, I);
3542               // Add back the cost of s|zext which is subtracted separately.
3543               DeadCost += TTI->getCastInstrCost(
3544                   Ext->getOpcode(), Ext->getType(), EI->getType(),
3545                   TTI::getCastContextHint(Ext), CostKind, Ext);
3546               continue;
3547             }
3548           }
3549           DeadCost -=
3550               TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, I);
3551         }
3552       }
3553       return DeadCost;
3554     }
3555     case Instruction::ZExt:
3556     case Instruction::SExt:
3557     case Instruction::FPToUI:
3558     case Instruction::FPToSI:
3559     case Instruction::FPExt:
3560     case Instruction::PtrToInt:
3561     case Instruction::IntToPtr:
3562     case Instruction::SIToFP:
3563     case Instruction::UIToFP:
3564     case Instruction::Trunc:
3565     case Instruction::FPTrunc:
3566     case Instruction::BitCast: {
3567       Type *SrcTy = VL0->getOperand(0)->getType();
3568       int ScalarEltCost =
3569           TTI->getCastInstrCost(E->getOpcode(), ScalarTy, SrcTy,
3570                                 TTI::getCastContextHint(VL0), CostKind, VL0);
3571       if (NeedToShuffleReuses) {
3572         ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3573       }
3574 
3575       // Calculate the cost of this instruction.
3576       int ScalarCost = VL.size() * ScalarEltCost;
3577 
3578       auto *SrcVecTy = FixedVectorType::get(SrcTy, VL.size());
3579       int VecCost = 0;
3580       // Check if the values are candidates to demote.
3581       if (!MinBWs.count(VL0) || VecTy != SrcVecTy) {
3582         VecCost =
3583             ReuseShuffleCost +
3584             TTI->getCastInstrCost(E->getOpcode(), VecTy, SrcVecTy,
3585                                   TTI::getCastContextHint(VL0), CostKind, VL0);
3586       }
3587       LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecCost, ScalarCost));
3588       return VecCost - ScalarCost;
3589     }
3590     case Instruction::FCmp:
3591     case Instruction::ICmp:
3592     case Instruction::Select: {
3593       // Calculate the cost of this instruction.
3594       int ScalarEltCost =
3595           TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, Builder.getInt1Ty(),
3596                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, VL0);
3597       if (NeedToShuffleReuses) {
3598         ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3599       }
3600       auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size());
3601       int ScalarCost = VecTy->getNumElements() * ScalarEltCost;
3602 
3603       // Check if all entries in VL are either compares or selects with compares
3604       // as condition that have the same predicates.
3605       CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE;
3606       bool First = true;
3607       for (auto *V : VL) {
3608         CmpInst::Predicate CurrentPred;
3609         auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value());
3610         if ((!match(V, m_Select(MatchCmp, m_Value(), m_Value())) &&
3611              !match(V, MatchCmp)) ||
3612             (!First && VecPred != CurrentPred)) {
3613           VecPred = CmpInst::BAD_ICMP_PREDICATE;
3614           break;
3615         }
3616         First = false;
3617         VecPred = CurrentPred;
3618       }
3619 
3620       int VecCost = TTI->getCmpSelInstrCost(E->getOpcode(), VecTy, MaskTy,
3621                                             VecPred, CostKind, VL0);
3622       // Check if it is possible and profitable to use min/max for selects in
3623       // VL.
3624       //
3625       auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL);
3626       if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) {
3627         IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy,
3628                                           {VecTy, VecTy});
3629         int IntrinsicCost = TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
3630         // If the selects are the only uses of the compares, they will be dead
3631         // and we can adjust the cost by removing their cost.
3632         if (IntrinsicAndUse.second)
3633           IntrinsicCost -=
3634               TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy, MaskTy,
3635                                       CmpInst::BAD_ICMP_PREDICATE, CostKind);
3636         VecCost = std::min(VecCost, IntrinsicCost);
3637       }
3638       LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecCost, ScalarCost));
3639       return ReuseShuffleCost + VecCost - ScalarCost;
3640     }
3641     case Instruction::FNeg:
3642     case Instruction::Add:
3643     case Instruction::FAdd:
3644     case Instruction::Sub:
3645     case Instruction::FSub:
3646     case Instruction::Mul:
3647     case Instruction::FMul:
3648     case Instruction::UDiv:
3649     case Instruction::SDiv:
3650     case Instruction::FDiv:
3651     case Instruction::URem:
3652     case Instruction::SRem:
3653     case Instruction::FRem:
3654     case Instruction::Shl:
3655     case Instruction::LShr:
3656     case Instruction::AShr:
3657     case Instruction::And:
3658     case Instruction::Or:
3659     case Instruction::Xor: {
3660       // Certain instructions can be cheaper to vectorize if they have a
3661       // constant second vector operand.
3662       TargetTransformInfo::OperandValueKind Op1VK =
3663           TargetTransformInfo::OK_AnyValue;
3664       TargetTransformInfo::OperandValueKind Op2VK =
3665           TargetTransformInfo::OK_UniformConstantValue;
3666       TargetTransformInfo::OperandValueProperties Op1VP =
3667           TargetTransformInfo::OP_None;
3668       TargetTransformInfo::OperandValueProperties Op2VP =
3669           TargetTransformInfo::OP_PowerOf2;
3670 
3671       // If all operands are exactly the same ConstantInt then set the
3672       // operand kind to OK_UniformConstantValue.
3673       // If instead not all operands are constants, then set the operand kind
3674       // to OK_AnyValue. If all operands are constants but not the same,
3675       // then set the operand kind to OK_NonUniformConstantValue.
3676       ConstantInt *CInt0 = nullptr;
3677       for (unsigned i = 0, e = VL.size(); i < e; ++i) {
3678         const Instruction *I = cast<Instruction>(VL[i]);
3679         unsigned OpIdx = isa<BinaryOperator>(I) ? 1 : 0;
3680         ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(OpIdx));
3681         if (!CInt) {
3682           Op2VK = TargetTransformInfo::OK_AnyValue;
3683           Op2VP = TargetTransformInfo::OP_None;
3684           break;
3685         }
3686         if (Op2VP == TargetTransformInfo::OP_PowerOf2 &&
3687             !CInt->getValue().isPowerOf2())
3688           Op2VP = TargetTransformInfo::OP_None;
3689         if (i == 0) {
3690           CInt0 = CInt;
3691           continue;
3692         }
3693         if (CInt0 != CInt)
3694           Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
3695       }
3696 
3697       SmallVector<const Value *, 4> Operands(VL0->operand_values());
3698       int ScalarEltCost = TTI->getArithmeticInstrCost(
3699           E->getOpcode(), ScalarTy, CostKind, Op1VK, Op2VK, Op1VP, Op2VP,
3700           Operands, VL0);
3701       if (NeedToShuffleReuses) {
3702         ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3703       }
3704       int ScalarCost = VecTy->getNumElements() * ScalarEltCost;
3705       int VecCost = TTI->getArithmeticInstrCost(
3706           E->getOpcode(), VecTy, CostKind, Op1VK, Op2VK, Op1VP, Op2VP,
3707           Operands, VL0);
3708       LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecCost, ScalarCost));
3709       return ReuseShuffleCost + VecCost - ScalarCost;
3710     }
3711     case Instruction::GetElementPtr: {
3712       TargetTransformInfo::OperandValueKind Op1VK =
3713           TargetTransformInfo::OK_AnyValue;
3714       TargetTransformInfo::OperandValueKind Op2VK =
3715           TargetTransformInfo::OK_UniformConstantValue;
3716 
3717       int ScalarEltCost =
3718           TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, CostKind,
3719                                       Op1VK, Op2VK);
3720       if (NeedToShuffleReuses) {
3721         ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3722       }
3723       int ScalarCost = VecTy->getNumElements() * ScalarEltCost;
3724       int VecCost =
3725           TTI->getArithmeticInstrCost(Instruction::Add, VecTy, CostKind,
3726                                       Op1VK, Op2VK);
3727       LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecCost, ScalarCost));
3728       return ReuseShuffleCost + VecCost - ScalarCost;
3729     }
3730     case Instruction::Load: {
3731       // Cost of wide load - cost of scalar loads.
3732       Align alignment = cast<LoadInst>(VL0)->getAlign();
3733       int ScalarEltCost =
3734           TTI->getMemoryOpCost(Instruction::Load, ScalarTy, alignment, 0,
3735                                CostKind, VL0);
3736       if (NeedToShuffleReuses) {
3737         ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3738       }
3739       int ScalarLdCost = VecTy->getNumElements() * ScalarEltCost;
3740       int VecLdCost;
3741       if (E->State == TreeEntry::Vectorize) {
3742         VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, alignment, 0,
3743                                          CostKind, VL0);
3744       } else {
3745         assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState");
3746         VecLdCost = TTI->getGatherScatterOpCost(
3747             Instruction::Load, VecTy, cast<LoadInst>(VL0)->getPointerOperand(),
3748             /*VariableMask=*/false, alignment, CostKind, VL0);
3749       }
3750       if (!NeedToShuffleReuses && !E->ReorderIndices.empty())
3751         VecLdCost += TTI->getShuffleCost(
3752             TargetTransformInfo::SK_PermuteSingleSrc, VecTy);
3753       LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecLdCost, ScalarLdCost));
3754       return ReuseShuffleCost + VecLdCost - ScalarLdCost;
3755     }
3756     case Instruction::Store: {
3757       // We know that we can merge the stores. Calculate the cost.
3758       bool IsReorder = !E->ReorderIndices.empty();
3759       auto *SI =
3760           cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0);
3761       Align Alignment = SI->getAlign();
3762       int ScalarEltCost =
3763           TTI->getMemoryOpCost(Instruction::Store, ScalarTy, Alignment, 0,
3764                                CostKind, VL0);
3765       int ScalarStCost = VecTy->getNumElements() * ScalarEltCost;
3766       int VecStCost = TTI->getMemoryOpCost(Instruction::Store,
3767                                            VecTy, Alignment, 0, CostKind, VL0);
3768       if (IsReorder)
3769         VecStCost += TTI->getShuffleCost(
3770             TargetTransformInfo::SK_PermuteSingleSrc, VecTy);
3771       LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecStCost, ScalarStCost));
3772       return VecStCost - ScalarStCost;
3773     }
3774     case Instruction::Call: {
3775       CallInst *CI = cast<CallInst>(VL0);
3776       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3777 
3778       // Calculate the cost of the scalar and vector calls.
3779       IntrinsicCostAttributes CostAttrs(ID, *CI, ElementCount::getFixed(1), 1);
3780       int ScalarEltCost = TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
3781       if (NeedToShuffleReuses) {
3782         ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3783       }
3784       int ScalarCallCost = VecTy->getNumElements() * ScalarEltCost;
3785 
3786       auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI);
3787       int VecCallCost = std::min(VecCallCosts.first, VecCallCosts.second);
3788 
3789       LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost
3790                         << " (" << VecCallCost << "-" << ScalarCallCost << ")"
3791                         << " for " << *CI << "\n");
3792 
3793       return ReuseShuffleCost + VecCallCost - ScalarCallCost;
3794     }
3795     case Instruction::ShuffleVector: {
3796       assert(E->isAltShuffle() &&
3797              ((Instruction::isBinaryOp(E->getOpcode()) &&
3798                Instruction::isBinaryOp(E->getAltOpcode())) ||
3799               (Instruction::isCast(E->getOpcode()) &&
3800                Instruction::isCast(E->getAltOpcode()))) &&
3801              "Invalid Shuffle Vector Operand");
3802       int ScalarCost = 0;
3803       if (NeedToShuffleReuses) {
3804         for (unsigned Idx : E->ReuseShuffleIndices) {
3805           Instruction *I = cast<Instruction>(VL[Idx]);
3806           InstructionCost Cost = TTI->getInstructionCost(I, CostKind);
3807           assert(Cost.isValid() && "Invalid instruction cost");
3808           ReuseShuffleCost -= *(Cost.getValue());
3809         }
3810         for (Value *V : VL) {
3811           Instruction *I = cast<Instruction>(V);
3812           InstructionCost Cost = TTI->getInstructionCost(I, CostKind);
3813           assert(Cost.isValid() && "Invalid instruction cost");
3814           ReuseShuffleCost += *(Cost.getValue());
3815         }
3816       }
3817       for (Value *V : VL) {
3818         Instruction *I = cast<Instruction>(V);
3819         assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode");
3820         InstructionCost Cost = TTI->getInstructionCost(I, CostKind);
3821         assert(Cost.isValid() && "Invalid instruction cost");
3822         ScalarCost += *(Cost.getValue());
3823       }
3824       // VecCost is equal to sum of the cost of creating 2 vectors
3825       // and the cost of creating shuffle.
3826       int VecCost = 0;
3827       if (Instruction::isBinaryOp(E->getOpcode())) {
3828         VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind);
3829         VecCost += TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy,
3830                                                CostKind);
3831       } else {
3832         Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType();
3833         Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType();
3834         auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size());
3835         auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size());
3836         VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty,
3837                                         TTI::CastContextHint::None, CostKind);
3838         VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty,
3839                                          TTI::CastContextHint::None, CostKind);
3840       }
3841       VecCost += TTI->getShuffleCost(TargetTransformInfo::SK_Select, VecTy, 0);
3842       LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecCost, ScalarCost));
3843       return ReuseShuffleCost + VecCost - ScalarCost;
3844     }
3845     default:
3846       llvm_unreachable("Unknown instruction");
3847   }
3848 }
3849 
3850 bool BoUpSLP::isFullyVectorizableTinyTree() const {
3851   LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height "
3852                     << VectorizableTree.size() << " is fully vectorizable .\n");
3853 
3854   // We only handle trees of heights 1 and 2.
3855   if (VectorizableTree.size() == 1 &&
3856       VectorizableTree[0]->State == TreeEntry::Vectorize)
3857     return true;
3858 
3859   if (VectorizableTree.size() != 2)
3860     return false;
3861 
3862   // Handle splat and all-constants stores.
3863   if (VectorizableTree[0]->State == TreeEntry::Vectorize &&
3864       (allConstant(VectorizableTree[1]->Scalars) ||
3865        isSplat(VectorizableTree[1]->Scalars)))
3866     return true;
3867 
3868   // Gathering cost would be too much for tiny trees.
3869   if (VectorizableTree[0]->State == TreeEntry::NeedToGather ||
3870       VectorizableTree[1]->State == TreeEntry::NeedToGather)
3871     return false;
3872 
3873   return true;
3874 }
3875 
3876 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts,
3877                                        TargetTransformInfo *TTI) {
3878   // Look past the root to find a source value. Arbitrarily follow the
3879   // path through operand 0 of any 'or'. Also, peek through optional
3880   // shift-left-by-multiple-of-8-bits.
3881   Value *ZextLoad = Root;
3882   const APInt *ShAmtC;
3883   while (!isa<ConstantExpr>(ZextLoad) &&
3884          (match(ZextLoad, m_Or(m_Value(), m_Value())) ||
3885           (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) &&
3886            ShAmtC->urem(8) == 0)))
3887     ZextLoad = cast<BinaryOperator>(ZextLoad)->getOperand(0);
3888 
3889   // Check if the input is an extended load of the required or/shift expression.
3890   Value *LoadPtr;
3891   if (ZextLoad == Root || !match(ZextLoad, m_ZExt(m_Load(m_Value(LoadPtr)))))
3892     return false;
3893 
3894   // Require that the total load bit width is a legal integer type.
3895   // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target.
3896   // But <16 x i8> --> i128 is not, so the backend probably can't reduce it.
3897   Type *SrcTy = LoadPtr->getType()->getPointerElementType();
3898   unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts;
3899   if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth)))
3900     return false;
3901 
3902   // Everything matched - assume that we can fold the whole sequence using
3903   // load combining.
3904   LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at "
3905              << *(cast<Instruction>(Root)) << "\n");
3906 
3907   return true;
3908 }
3909 
3910 bool BoUpSLP::isLoadCombineReductionCandidate(unsigned RdxOpcode) const {
3911   if (RdxOpcode != Instruction::Or)
3912     return false;
3913 
3914   unsigned NumElts = VectorizableTree[0]->Scalars.size();
3915   Value *FirstReduced = VectorizableTree[0]->Scalars[0];
3916   return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI);
3917 }
3918 
3919 bool BoUpSLP::isLoadCombineCandidate() const {
3920   // Peek through a final sequence of stores and check if all operations are
3921   // likely to be load-combined.
3922   unsigned NumElts = VectorizableTree[0]->Scalars.size();
3923   for (Value *Scalar : VectorizableTree[0]->Scalars) {
3924     Value *X;
3925     if (!match(Scalar, m_Store(m_Value(X), m_Value())) ||
3926         !isLoadCombineCandidateImpl(X, NumElts, TTI))
3927       return false;
3928   }
3929   return true;
3930 }
3931 
3932 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() const {
3933   // We can vectorize the tree if its size is greater than or equal to the
3934   // minimum size specified by the MinTreeSize command line option.
3935   if (VectorizableTree.size() >= MinTreeSize)
3936     return false;
3937 
3938   // If we have a tiny tree (a tree whose size is less than MinTreeSize), we
3939   // can vectorize it if we can prove it fully vectorizable.
3940   if (isFullyVectorizableTinyTree())
3941     return false;
3942 
3943   assert(VectorizableTree.empty()
3944              ? ExternalUses.empty()
3945              : true && "We shouldn't have any external users");
3946 
3947   // Otherwise, we can't vectorize the tree. It is both tiny and not fully
3948   // vectorizable.
3949   return true;
3950 }
3951 
3952 int BoUpSLP::getSpillCost() const {
3953   // Walk from the bottom of the tree to the top, tracking which values are
3954   // live. When we see a call instruction that is not part of our tree,
3955   // query TTI to see if there is a cost to keeping values live over it
3956   // (for example, if spills and fills are required).
3957   unsigned BundleWidth = VectorizableTree.front()->Scalars.size();
3958   int Cost = 0;
3959 
3960   SmallPtrSet<Instruction*, 4> LiveValues;
3961   Instruction *PrevInst = nullptr;
3962 
3963   // The entries in VectorizableTree are not necessarily ordered by their
3964   // position in basic blocks. Collect them and order them by dominance so later
3965   // instructions are guaranteed to be visited first. For instructions in
3966   // different basic blocks, we only scan to the beginning of the block, so
3967   // their order does not matter, as long as all instructions in a basic block
3968   // are grouped together. Using dominance ensures a deterministic order.
3969   SmallVector<Instruction *, 16> OrderedScalars;
3970   for (const auto &TEPtr : VectorizableTree) {
3971     Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]);
3972     if (!Inst)
3973       continue;
3974     OrderedScalars.push_back(Inst);
3975   }
3976   llvm::stable_sort(OrderedScalars, [this](Instruction *A, Instruction *B) {
3977     return DT->dominates(B, A);
3978   });
3979 
3980   for (Instruction *Inst : OrderedScalars) {
3981     if (!PrevInst) {
3982       PrevInst = Inst;
3983       continue;
3984     }
3985 
3986     // Update LiveValues.
3987     LiveValues.erase(PrevInst);
3988     for (auto &J : PrevInst->operands()) {
3989       if (isa<Instruction>(&*J) && getTreeEntry(&*J))
3990         LiveValues.insert(cast<Instruction>(&*J));
3991     }
3992 
3993     LLVM_DEBUG({
3994       dbgs() << "SLP: #LV: " << LiveValues.size();
3995       for (auto *X : LiveValues)
3996         dbgs() << " " << X->getName();
3997       dbgs() << ", Looking at ";
3998       Inst->dump();
3999     });
4000 
4001     // Now find the sequence of instructions between PrevInst and Inst.
4002     unsigned NumCalls = 0;
4003     BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(),
4004                                  PrevInstIt =
4005                                      PrevInst->getIterator().getReverse();
4006     while (InstIt != PrevInstIt) {
4007       if (PrevInstIt == PrevInst->getParent()->rend()) {
4008         PrevInstIt = Inst->getParent()->rbegin();
4009         continue;
4010       }
4011 
4012       // Debug information does not impact spill cost.
4013       if ((isa<CallInst>(&*PrevInstIt) &&
4014            !isa<DbgInfoIntrinsic>(&*PrevInstIt)) &&
4015           &*PrevInstIt != PrevInst)
4016         NumCalls++;
4017 
4018       ++PrevInstIt;
4019     }
4020 
4021     if (NumCalls) {
4022       SmallVector<Type*, 4> V;
4023       for (auto *II : LiveValues)
4024         V.push_back(FixedVectorType::get(II->getType(), BundleWidth));
4025       Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V);
4026     }
4027 
4028     PrevInst = Inst;
4029   }
4030 
4031   return Cost;
4032 }
4033 
4034 int BoUpSLP::getTreeCost() {
4035   int Cost = 0;
4036   LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size "
4037                     << VectorizableTree.size() << ".\n");
4038 
4039   unsigned BundleWidth = VectorizableTree[0]->Scalars.size();
4040 
4041   for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) {
4042     TreeEntry &TE = *VectorizableTree[I].get();
4043 
4044     // We create duplicate tree entries for gather sequences that have multiple
4045     // uses. However, we should not compute the cost of duplicate sequences.
4046     // For example, if we have a build vector (i.e., insertelement sequence)
4047     // that is used by more than one vector instruction, we only need to
4048     // compute the cost of the insertelement instructions once. The redundant
4049     // instructions will be eliminated by CSE.
4050     //
4051     // We should consider not creating duplicate tree entries for gather
4052     // sequences, and instead add additional edges to the tree representing
4053     // their uses. Since such an approach results in fewer total entries,
4054     // existing heuristics based on tree size may yield different results.
4055     //
4056     if (TE.State == TreeEntry::NeedToGather &&
4057         std::any_of(std::next(VectorizableTree.begin(), I + 1),
4058                     VectorizableTree.end(),
4059                     [TE](const std::unique_ptr<TreeEntry> &EntryPtr) {
4060                       return EntryPtr->State == TreeEntry::NeedToGather &&
4061                              EntryPtr->isSame(TE.Scalars);
4062                     }))
4063       continue;
4064 
4065     int C = getEntryCost(&TE);
4066     Cost += C;
4067     LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
4068                       << " for bundle that starts with " << *TE.Scalars[0]
4069                       << ".\n"
4070                       << "SLP: Current total cost = " << Cost << "\n");
4071   }
4072 
4073   SmallPtrSet<Value *, 16> ExtractCostCalculated;
4074   int ExtractCost = 0;
4075   for (ExternalUser &EU : ExternalUses) {
4076     // We only add extract cost once for the same scalar.
4077     if (!ExtractCostCalculated.insert(EU.Scalar).second)
4078       continue;
4079 
4080     // Uses by ephemeral values are free (because the ephemeral value will be
4081     // removed prior to code generation, and so the extraction will be
4082     // removed as well).
4083     if (EphValues.count(EU.User))
4084       continue;
4085 
4086     // If we plan to rewrite the tree in a smaller type, we will need to sign
4087     // extend the extracted value back to the original type. Here, we account
4088     // for the extract and the added cost of the sign extend if needed.
4089     auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth);
4090     auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
4091     if (MinBWs.count(ScalarRoot)) {
4092       auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
4093       auto Extend =
4094           MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt;
4095       VecTy = FixedVectorType::get(MinTy, BundleWidth);
4096       ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(),
4097                                                    VecTy, EU.Lane);
4098     } else {
4099       ExtractCost +=
4100           TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane);
4101     }
4102   }
4103 
4104   int SpillCost = getSpillCost();
4105   Cost += SpillCost + ExtractCost;
4106 
4107 #ifndef NDEBUG
4108   SmallString<256> Str;
4109   {
4110     raw_svector_ostream OS(Str);
4111     OS << "SLP: Spill Cost = " << SpillCost << ".\n"
4112        << "SLP: Extract Cost = " << ExtractCost << ".\n"
4113        << "SLP: Total Cost = " << Cost << ".\n";
4114   }
4115   LLVM_DEBUG(dbgs() << Str);
4116   if (ViewSLPTree)
4117     ViewGraph(this, "SLP" + F->getName(), false, Str);
4118 #endif
4119 
4120   return Cost;
4121 }
4122 
4123 int BoUpSLP::getGatherCost(FixedVectorType *Ty,
4124                            const DenseSet<unsigned> &ShuffledIndices) const {
4125   unsigned NumElts = Ty->getNumElements();
4126   APInt DemandedElts = APInt::getNullValue(NumElts);
4127   for (unsigned I = 0; I < NumElts; ++I)
4128     if (!ShuffledIndices.count(I))
4129       DemandedElts.setBit(I);
4130   int Cost = TTI->getScalarizationOverhead(Ty, DemandedElts, /*Insert*/ true,
4131                                            /*Extract*/ false);
4132   if (!ShuffledIndices.empty())
4133     Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty);
4134   return Cost;
4135 }
4136 
4137 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const {
4138   // Find the type of the operands in VL.
4139   Type *ScalarTy = VL[0]->getType();
4140   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
4141     ScalarTy = SI->getValueOperand()->getType();
4142   auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
4143   // Find the cost of inserting/extracting values from the vector.
4144   // Check if the same elements are inserted several times and count them as
4145   // shuffle candidates.
4146   DenseSet<unsigned> ShuffledElements;
4147   DenseSet<Value *> UniqueElements;
4148   // Iterate in reverse order to consider insert elements with the high cost.
4149   for (unsigned I = VL.size(); I > 0; --I) {
4150     unsigned Idx = I - 1;
4151     if (!UniqueElements.insert(VL[Idx]).second)
4152       ShuffledElements.insert(Idx);
4153   }
4154   return getGatherCost(VecTy, ShuffledElements);
4155 }
4156 
4157 // Perform operand reordering on the instructions in VL and return the reordered
4158 // operands in Left and Right.
4159 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
4160                                              SmallVectorImpl<Value *> &Left,
4161                                              SmallVectorImpl<Value *> &Right,
4162                                              const DataLayout &DL,
4163                                              ScalarEvolution &SE,
4164                                              const BoUpSLP &R) {
4165   if (VL.empty())
4166     return;
4167   VLOperands Ops(VL, DL, SE, R);
4168   // Reorder the operands in place.
4169   Ops.reorder();
4170   Left = Ops.getVL(0);
4171   Right = Ops.getVL(1);
4172 }
4173 
4174 void BoUpSLP::setInsertPointAfterBundle(TreeEntry *E) {
4175   // Get the basic block this bundle is in. All instructions in the bundle
4176   // should be in this block.
4177   auto *Front = E->getMainOp();
4178   auto *BB = Front->getParent();
4179   assert(llvm::all_of(make_range(E->Scalars.begin(), E->Scalars.end()),
4180                       [=](Value *V) -> bool {
4181                         auto *I = cast<Instruction>(V);
4182                         return !E->isOpcodeOrAlt(I) || I->getParent() == BB;
4183                       }));
4184 
4185   // The last instruction in the bundle in program order.
4186   Instruction *LastInst = nullptr;
4187 
4188   // Find the last instruction. The common case should be that BB has been
4189   // scheduled, and the last instruction is VL.back(). So we start with
4190   // VL.back() and iterate over schedule data until we reach the end of the
4191   // bundle. The end of the bundle is marked by null ScheduleData.
4192   if (BlocksSchedules.count(BB)) {
4193     auto *Bundle =
4194         BlocksSchedules[BB]->getScheduleData(E->isOneOf(E->Scalars.back()));
4195     if (Bundle && Bundle->isPartOfBundle())
4196       for (; Bundle; Bundle = Bundle->NextInBundle)
4197         if (Bundle->OpValue == Bundle->Inst)
4198           LastInst = Bundle->Inst;
4199   }
4200 
4201   // LastInst can still be null at this point if there's either not an entry
4202   // for BB in BlocksSchedules or there's no ScheduleData available for
4203   // VL.back(). This can be the case if buildTree_rec aborts for various
4204   // reasons (e.g., the maximum recursion depth is reached, the maximum region
4205   // size is reached, etc.). ScheduleData is initialized in the scheduling
4206   // "dry-run".
4207   //
4208   // If this happens, we can still find the last instruction by brute force. We
4209   // iterate forwards from Front (inclusive) until we either see all
4210   // instructions in the bundle or reach the end of the block. If Front is the
4211   // last instruction in program order, LastInst will be set to Front, and we
4212   // will visit all the remaining instructions in the block.
4213   //
4214   // One of the reasons we exit early from buildTree_rec is to place an upper
4215   // bound on compile-time. Thus, taking an additional compile-time hit here is
4216   // not ideal. However, this should be exceedingly rare since it requires that
4217   // we both exit early from buildTree_rec and that the bundle be out-of-order
4218   // (causing us to iterate all the way to the end of the block).
4219   if (!LastInst) {
4220     SmallPtrSet<Value *, 16> Bundle(E->Scalars.begin(), E->Scalars.end());
4221     for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) {
4222       if (Bundle.erase(&I) && E->isOpcodeOrAlt(&I))
4223         LastInst = &I;
4224       if (Bundle.empty())
4225         break;
4226     }
4227   }
4228   assert(LastInst && "Failed to find last instruction in bundle");
4229 
4230   // Set the insertion point after the last instruction in the bundle. Set the
4231   // debug location to Front.
4232   Builder.SetInsertPoint(BB, ++LastInst->getIterator());
4233   Builder.SetCurrentDebugLocation(Front->getDebugLoc());
4234 }
4235 
4236 Value *BoUpSLP::gather(ArrayRef<Value *> VL) {
4237   Value *Val0 =
4238       isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0];
4239   FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size());
4240   Value *Vec = UndefValue::get(VecTy);
4241   unsigned InsIndex = 0;
4242   for (Value *Val : VL) {
4243     Vec = Builder.CreateInsertElement(Vec, Val, Builder.getInt32(InsIndex++));
4244     auto *InsElt = dyn_cast<InsertElementInst>(Vec);
4245     if (!InsElt)
4246       continue;
4247     GatherSeq.insert(InsElt);
4248     CSEBlocks.insert(InsElt->getParent());
4249     // Add to our 'need-to-extract' list.
4250     if (TreeEntry *Entry = getTreeEntry(Val)) {
4251       // Find which lane we need to extract.
4252       unsigned FoundLane = std::distance(Entry->Scalars.begin(),
4253                                          find(Entry->Scalars, Val));
4254       assert(FoundLane < Entry->Scalars.size() && "Couldn't find extract lane");
4255       if (!Entry->ReuseShuffleIndices.empty()) {
4256         FoundLane = std::distance(Entry->ReuseShuffleIndices.begin(),
4257                                   find(Entry->ReuseShuffleIndices, FoundLane));
4258       }
4259       ExternalUses.push_back(ExternalUser(Val, InsElt, FoundLane));
4260     }
4261   }
4262 
4263   return Vec;
4264 }
4265 
4266 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
4267   InstructionsState S = getSameOpcode(VL);
4268   if (S.getOpcode()) {
4269     if (TreeEntry *E = getTreeEntry(S.OpValue)) {
4270       if (E->isSame(VL)) {
4271         Value *V = vectorizeTree(E);
4272         if (VL.size() == E->Scalars.size() && !E->ReuseShuffleIndices.empty()) {
4273           // We need to get the vectorized value but without shuffle.
4274           if (auto *SV = dyn_cast<ShuffleVectorInst>(V)) {
4275             V = SV->getOperand(0);
4276           } else {
4277             // Reshuffle to get only unique values.
4278             SmallVector<int, 4> UniqueIdxs;
4279             SmallSet<int, 4> UsedIdxs;
4280             for (int Idx : E->ReuseShuffleIndices)
4281               if (UsedIdxs.insert(Idx).second)
4282                 UniqueIdxs.emplace_back(Idx);
4283             V = Builder.CreateShuffleVector(V, UniqueIdxs);
4284           }
4285         }
4286         return V;
4287       }
4288     }
4289   }
4290 
4291   // Check that every instruction appears once in this bundle.
4292   SmallVector<int, 4> ReuseShuffleIndicies;
4293   SmallVector<Value *, 4> UniqueValues;
4294   if (VL.size() > 2) {
4295     DenseMap<Value *, unsigned> UniquePositions;
4296     for (Value *V : VL) {
4297       auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
4298       ReuseShuffleIndicies.emplace_back(Res.first->second);
4299       if (Res.second || isa<Constant>(V))
4300         UniqueValues.emplace_back(V);
4301     }
4302     // Do not shuffle single element or if number of unique values is not power
4303     // of 2.
4304     if (UniqueValues.size() == VL.size() || UniqueValues.size() <= 1 ||
4305         !llvm::isPowerOf2_32(UniqueValues.size()))
4306       ReuseShuffleIndicies.clear();
4307     else
4308       VL = UniqueValues;
4309   }
4310 
4311   Value *Vec = gather(VL);
4312   if (!ReuseShuffleIndicies.empty()) {
4313     Vec = Builder.CreateShuffleVector(Vec, ReuseShuffleIndicies, "shuffle");
4314     if (auto *I = dyn_cast<Instruction>(Vec)) {
4315       GatherSeq.insert(I);
4316       CSEBlocks.insert(I->getParent());
4317     }
4318   }
4319   return Vec;
4320 }
4321 
4322 namespace {
4323 /// Merges shuffle masks and emits final shuffle instruction, if required.
4324 class ShuffleInstructionBuilder {
4325   IRBuilderBase &Builder;
4326   bool IsFinalized = false;
4327   SmallVector<int, 4> Mask;
4328 
4329 public:
4330   ShuffleInstructionBuilder(IRBuilderBase &Builder) : Builder(Builder) {}
4331 
4332   /// Adds a mask, inverting it before applying.
4333   void addInversedMask(ArrayRef<unsigned> SubMask) {
4334     if (SubMask.empty())
4335       return;
4336     SmallVector<int, 4> NewMask;
4337     inversePermutation(SubMask, NewMask);
4338     addMask(NewMask);
4339   }
4340 
4341   /// Functions adds masks, merging them into  single one.
4342   void addMask(ArrayRef<unsigned> SubMask) {
4343     SmallVector<int, 4> NewMask(SubMask.begin(), SubMask.end());
4344     addMask(NewMask);
4345   }
4346 
4347   void addMask(ArrayRef<int> SubMask) {
4348     if (SubMask.empty())
4349       return;
4350     if (Mask.empty()) {
4351       Mask.append(SubMask.begin(), SubMask.end());
4352       return;
4353     }
4354     SmallVector<int, 4> NewMask(SubMask.size(), SubMask.size());
4355     int TermValue = std::min(Mask.size(), SubMask.size());
4356     for (int I = 0, E = SubMask.size(); I < E; ++I) {
4357       if (SubMask[I] >= TermValue || Mask[SubMask[I]] >= TermValue) {
4358         NewMask[I] = E;
4359         continue;
4360       }
4361       NewMask[I] = Mask[SubMask[I]];
4362     }
4363     Mask.swap(NewMask);
4364   }
4365 
4366   Value *finalize(Value *V) {
4367     IsFinalized = true;
4368     if (Mask.empty())
4369       return V;
4370     return Builder.CreateShuffleVector(V, Mask, "shuffle");
4371   }
4372 
4373   ~ShuffleInstructionBuilder() {
4374     assert((IsFinalized || Mask.empty()) &&
4375            "Must be finalized construction of the shuffles.");
4376   }
4377 };
4378 } // namespace
4379 
4380 Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
4381   IRBuilder<>::InsertPointGuard Guard(Builder);
4382 
4383   if (E->VectorizedValue) {
4384     LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
4385     return E->VectorizedValue;
4386   }
4387 
4388   ShuffleInstructionBuilder ShuffleBuilder(Builder);
4389   bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
4390   if (E->State == TreeEntry::NeedToGather) {
4391     setInsertPointAfterBundle(E);
4392     Value *Vec = gather(E->Scalars);
4393     if (NeedToShuffleReuses) {
4394       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4395       Vec = ShuffleBuilder.finalize(Vec);
4396       if (auto *I = dyn_cast<Instruction>(Vec)) {
4397         GatherSeq.insert(I);
4398         CSEBlocks.insert(I->getParent());
4399       }
4400     }
4401     E->VectorizedValue = Vec;
4402     return Vec;
4403   }
4404 
4405   assert((E->State == TreeEntry::Vectorize ||
4406           E->State == TreeEntry::ScatterVectorize) &&
4407          "Unhandled state");
4408   unsigned ShuffleOrOp =
4409       E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
4410   Instruction *VL0 = E->getMainOp();
4411   Type *ScalarTy = VL0->getType();
4412   if (auto *Store = dyn_cast<StoreInst>(VL0))
4413     ScalarTy = Store->getValueOperand()->getType();
4414   auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size());
4415   switch (ShuffleOrOp) {
4416     case Instruction::PHI: {
4417       auto *PH = cast<PHINode>(VL0);
4418       Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI());
4419       Builder.SetCurrentDebugLocation(PH->getDebugLoc());
4420       PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
4421       Value *V = NewPhi;
4422       if (NeedToShuffleReuses)
4423         V = Builder.CreateShuffleVector(V, E->ReuseShuffleIndices, "shuffle");
4424 
4425       E->VectorizedValue = V;
4426 
4427       // PHINodes may have multiple entries from the same block. We want to
4428       // visit every block once.
4429       SmallPtrSet<BasicBlock*, 4> VisitedBBs;
4430 
4431       for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
4432         ValueList Operands;
4433         BasicBlock *IBB = PH->getIncomingBlock(i);
4434 
4435         if (!VisitedBBs.insert(IBB).second) {
4436           NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
4437           continue;
4438         }
4439 
4440         Builder.SetInsertPoint(IBB->getTerminator());
4441         Builder.SetCurrentDebugLocation(PH->getDebugLoc());
4442         Value *Vec = vectorizeTree(E->getOperand(i));
4443         NewPhi->addIncoming(Vec, IBB);
4444       }
4445 
4446       assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&
4447              "Invalid number of incoming values");
4448       return V;
4449     }
4450 
4451     case Instruction::ExtractElement: {
4452       Value *V = E->getSingleOperand(0);
4453       Builder.SetInsertPoint(VL0);
4454       ShuffleBuilder.addInversedMask(E->ReorderIndices);
4455       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4456       V = ShuffleBuilder.finalize(V);
4457       E->VectorizedValue = V;
4458       return V;
4459     }
4460     case Instruction::ExtractValue: {
4461       auto *LI = cast<LoadInst>(E->getSingleOperand(0));
4462       Builder.SetInsertPoint(LI);
4463       auto *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace());
4464       Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy);
4465       LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign());
4466       Value *NewV = propagateMetadata(V, E->Scalars);
4467       ShuffleBuilder.addInversedMask(E->ReorderIndices);
4468       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4469       NewV = ShuffleBuilder.finalize(NewV);
4470       E->VectorizedValue = NewV;
4471       return NewV;
4472     }
4473     case Instruction::ZExt:
4474     case Instruction::SExt:
4475     case Instruction::FPToUI:
4476     case Instruction::FPToSI:
4477     case Instruction::FPExt:
4478     case Instruction::PtrToInt:
4479     case Instruction::IntToPtr:
4480     case Instruction::SIToFP:
4481     case Instruction::UIToFP:
4482     case Instruction::Trunc:
4483     case Instruction::FPTrunc:
4484     case Instruction::BitCast: {
4485       setInsertPointAfterBundle(E);
4486 
4487       Value *InVec = vectorizeTree(E->getOperand(0));
4488 
4489       if (E->VectorizedValue) {
4490         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
4491         return E->VectorizedValue;
4492       }
4493 
4494       auto *CI = cast<CastInst>(VL0);
4495       Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy);
4496       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4497       V = ShuffleBuilder.finalize(V);
4498 
4499       E->VectorizedValue = V;
4500       ++NumVectorInstructions;
4501       return V;
4502     }
4503     case Instruction::FCmp:
4504     case Instruction::ICmp: {
4505       setInsertPointAfterBundle(E);
4506 
4507       Value *L = vectorizeTree(E->getOperand(0));
4508       Value *R = vectorizeTree(E->getOperand(1));
4509 
4510       if (E->VectorizedValue) {
4511         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
4512         return E->VectorizedValue;
4513       }
4514 
4515       CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
4516       Value *V = Builder.CreateCmp(P0, L, R);
4517       propagateIRFlags(V, E->Scalars, VL0);
4518       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4519       V = ShuffleBuilder.finalize(V);
4520 
4521       E->VectorizedValue = V;
4522       ++NumVectorInstructions;
4523       return V;
4524     }
4525     case Instruction::Select: {
4526       setInsertPointAfterBundle(E);
4527 
4528       Value *Cond = vectorizeTree(E->getOperand(0));
4529       Value *True = vectorizeTree(E->getOperand(1));
4530       Value *False = vectorizeTree(E->getOperand(2));
4531 
4532       if (E->VectorizedValue) {
4533         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
4534         return E->VectorizedValue;
4535       }
4536 
4537       Value *V = Builder.CreateSelect(Cond, True, False);
4538       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4539       V = ShuffleBuilder.finalize(V);
4540 
4541       E->VectorizedValue = V;
4542       ++NumVectorInstructions;
4543       return V;
4544     }
4545     case Instruction::FNeg: {
4546       setInsertPointAfterBundle(E);
4547 
4548       Value *Op = vectorizeTree(E->getOperand(0));
4549 
4550       if (E->VectorizedValue) {
4551         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
4552         return E->VectorizedValue;
4553       }
4554 
4555       Value *V = Builder.CreateUnOp(
4556           static_cast<Instruction::UnaryOps>(E->getOpcode()), Op);
4557       propagateIRFlags(V, E->Scalars, VL0);
4558       if (auto *I = dyn_cast<Instruction>(V))
4559         V = propagateMetadata(I, E->Scalars);
4560 
4561       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4562       V = ShuffleBuilder.finalize(V);
4563 
4564       E->VectorizedValue = V;
4565       ++NumVectorInstructions;
4566 
4567       return V;
4568     }
4569     case Instruction::Add:
4570     case Instruction::FAdd:
4571     case Instruction::Sub:
4572     case Instruction::FSub:
4573     case Instruction::Mul:
4574     case Instruction::FMul:
4575     case Instruction::UDiv:
4576     case Instruction::SDiv:
4577     case Instruction::FDiv:
4578     case Instruction::URem:
4579     case Instruction::SRem:
4580     case Instruction::FRem:
4581     case Instruction::Shl:
4582     case Instruction::LShr:
4583     case Instruction::AShr:
4584     case Instruction::And:
4585     case Instruction::Or:
4586     case Instruction::Xor: {
4587       setInsertPointAfterBundle(E);
4588 
4589       Value *LHS = vectorizeTree(E->getOperand(0));
4590       Value *RHS = vectorizeTree(E->getOperand(1));
4591 
4592       if (E->VectorizedValue) {
4593         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
4594         return E->VectorizedValue;
4595       }
4596 
4597       Value *V = Builder.CreateBinOp(
4598           static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS,
4599           RHS);
4600       propagateIRFlags(V, E->Scalars, VL0);
4601       if (auto *I = dyn_cast<Instruction>(V))
4602         V = propagateMetadata(I, E->Scalars);
4603 
4604       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4605       V = ShuffleBuilder.finalize(V);
4606 
4607       E->VectorizedValue = V;
4608       ++NumVectorInstructions;
4609 
4610       return V;
4611     }
4612     case Instruction::Load: {
4613       // Loads are inserted at the head of the tree because we don't want to
4614       // sink them all the way down past store instructions.
4615       bool IsReorder = E->updateStateIfReorder();
4616       if (IsReorder)
4617         VL0 = E->getMainOp();
4618       setInsertPointAfterBundle(E);
4619 
4620       LoadInst *LI = cast<LoadInst>(VL0);
4621       Instruction *NewLI;
4622       unsigned AS = LI->getPointerAddressSpace();
4623       Value *PO = LI->getPointerOperand();
4624       if (E->State == TreeEntry::Vectorize) {
4625 
4626         Value *VecPtr = Builder.CreateBitCast(PO, VecTy->getPointerTo(AS));
4627 
4628         // The pointer operand uses an in-tree scalar so we add the new BitCast
4629         // to ExternalUses list to make sure that an extract will be generated
4630         // in the future.
4631         if (getTreeEntry(PO))
4632           ExternalUses.emplace_back(PO, cast<User>(VecPtr), 0);
4633 
4634         NewLI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign());
4635       } else {
4636         assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state");
4637         Value *VecPtr = vectorizeTree(E->getOperand(0));
4638         // Use the minimum alignment of the gathered loads.
4639         Align CommonAlignment = LI->getAlign();
4640         for (Value *V : E->Scalars)
4641           CommonAlignment =
4642               commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign());
4643         NewLI = Builder.CreateMaskedGather(VecPtr, CommonAlignment);
4644       }
4645       Value *V = propagateMetadata(NewLI, E->Scalars);
4646 
4647       ShuffleBuilder.addInversedMask(E->ReorderIndices);
4648       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4649       V = ShuffleBuilder.finalize(V);
4650       E->VectorizedValue = V;
4651       ++NumVectorInstructions;
4652       return V;
4653     }
4654     case Instruction::Store: {
4655       bool IsReorder = !E->ReorderIndices.empty();
4656       auto *SI = cast<StoreInst>(
4657           IsReorder ? E->Scalars[E->ReorderIndices.front()] : VL0);
4658       unsigned AS = SI->getPointerAddressSpace();
4659 
4660       setInsertPointAfterBundle(E);
4661 
4662       Value *VecValue = vectorizeTree(E->getOperand(0));
4663       ShuffleBuilder.addMask(E->ReorderIndices);
4664       VecValue = ShuffleBuilder.finalize(VecValue);
4665 
4666       Value *ScalarPtr = SI->getPointerOperand();
4667       Value *VecPtr = Builder.CreateBitCast(
4668           ScalarPtr, VecValue->getType()->getPointerTo(AS));
4669       StoreInst *ST = Builder.CreateAlignedStore(VecValue, VecPtr,
4670                                                  SI->getAlign());
4671 
4672       // The pointer operand uses an in-tree scalar, so add the new BitCast to
4673       // ExternalUses to make sure that an extract will be generated in the
4674       // future.
4675       if (getTreeEntry(ScalarPtr))
4676         ExternalUses.push_back(ExternalUser(ScalarPtr, cast<User>(VecPtr), 0));
4677 
4678       Value *V = propagateMetadata(ST, E->Scalars);
4679 
4680       E->VectorizedValue = V;
4681       ++NumVectorInstructions;
4682       return V;
4683     }
4684     case Instruction::GetElementPtr: {
4685       setInsertPointAfterBundle(E);
4686 
4687       Value *Op0 = vectorizeTree(E->getOperand(0));
4688 
4689       std::vector<Value *> OpVecs;
4690       for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e;
4691            ++j) {
4692         ValueList &VL = E->getOperand(j);
4693         // Need to cast all elements to the same type before vectorization to
4694         // avoid crash.
4695         Type *VL0Ty = VL0->getOperand(j)->getType();
4696         Type *Ty = llvm::all_of(
4697                        VL, [VL0Ty](Value *V) { return VL0Ty == V->getType(); })
4698                        ? VL0Ty
4699                        : DL->getIndexType(cast<GetElementPtrInst>(VL0)
4700                                               ->getPointerOperandType()
4701                                               ->getScalarType());
4702         for (Value *&V : VL) {
4703           auto *CI = cast<ConstantInt>(V);
4704           V = ConstantExpr::getIntegerCast(CI, Ty,
4705                                            CI->getValue().isSignBitSet());
4706         }
4707         Value *OpVec = vectorizeTree(VL);
4708         OpVecs.push_back(OpVec);
4709       }
4710 
4711       Value *V = Builder.CreateGEP(
4712           cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs);
4713       if (Instruction *I = dyn_cast<Instruction>(V))
4714         V = propagateMetadata(I, E->Scalars);
4715 
4716       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4717       V = ShuffleBuilder.finalize(V);
4718 
4719       E->VectorizedValue = V;
4720       ++NumVectorInstructions;
4721 
4722       return V;
4723     }
4724     case Instruction::Call: {
4725       CallInst *CI = cast<CallInst>(VL0);
4726       setInsertPointAfterBundle(E);
4727 
4728       Intrinsic::ID IID  = Intrinsic::not_intrinsic;
4729       if (Function *FI = CI->getCalledFunction())
4730         IID = FI->getIntrinsicID();
4731 
4732       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4733 
4734       auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI);
4735       bool UseIntrinsic = ID != Intrinsic::not_intrinsic &&
4736                           VecCallCosts.first <= VecCallCosts.second;
4737 
4738       Value *ScalarArg = nullptr;
4739       std::vector<Value *> OpVecs;
4740       for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) {
4741         ValueList OpVL;
4742         // Some intrinsics have scalar arguments. This argument should not be
4743         // vectorized.
4744         if (UseIntrinsic && hasVectorInstrinsicScalarOpd(IID, j)) {
4745           CallInst *CEI = cast<CallInst>(VL0);
4746           ScalarArg = CEI->getArgOperand(j);
4747           OpVecs.push_back(CEI->getArgOperand(j));
4748           continue;
4749         }
4750 
4751         Value *OpVec = vectorizeTree(E->getOperand(j));
4752         LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n");
4753         OpVecs.push_back(OpVec);
4754       }
4755 
4756       Function *CF;
4757       if (!UseIntrinsic) {
4758         VFShape Shape =
4759             VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>(
4760                                   VecTy->getNumElements())),
4761                          false /*HasGlobalPred*/);
4762         CF = VFDatabase(*CI).getVectorizedFunction(Shape);
4763       } else {
4764         Type *Tys[] = {FixedVectorType::get(CI->getType(), E->Scalars.size())};
4765         CF = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
4766       }
4767 
4768       SmallVector<OperandBundleDef, 1> OpBundles;
4769       CI->getOperandBundlesAsDefs(OpBundles);
4770       Value *V = Builder.CreateCall(CF, OpVecs, OpBundles);
4771 
4772       // The scalar argument uses an in-tree scalar so we add the new vectorized
4773       // call to ExternalUses list to make sure that an extract will be
4774       // generated in the future.
4775       if (ScalarArg && getTreeEntry(ScalarArg))
4776         ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0));
4777 
4778       propagateIRFlags(V, E->Scalars, VL0);
4779       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4780       V = ShuffleBuilder.finalize(V);
4781 
4782       E->VectorizedValue = V;
4783       ++NumVectorInstructions;
4784       return V;
4785     }
4786     case Instruction::ShuffleVector: {
4787       assert(E->isAltShuffle() &&
4788              ((Instruction::isBinaryOp(E->getOpcode()) &&
4789                Instruction::isBinaryOp(E->getAltOpcode())) ||
4790               (Instruction::isCast(E->getOpcode()) &&
4791                Instruction::isCast(E->getAltOpcode()))) &&
4792              "Invalid Shuffle Vector Operand");
4793 
4794       Value *LHS = nullptr, *RHS = nullptr;
4795       if (Instruction::isBinaryOp(E->getOpcode())) {
4796         setInsertPointAfterBundle(E);
4797         LHS = vectorizeTree(E->getOperand(0));
4798         RHS = vectorizeTree(E->getOperand(1));
4799       } else {
4800         setInsertPointAfterBundle(E);
4801         LHS = vectorizeTree(E->getOperand(0));
4802       }
4803 
4804       if (E->VectorizedValue) {
4805         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
4806         return E->VectorizedValue;
4807       }
4808 
4809       Value *V0, *V1;
4810       if (Instruction::isBinaryOp(E->getOpcode())) {
4811         V0 = Builder.CreateBinOp(
4812             static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS);
4813         V1 = Builder.CreateBinOp(
4814             static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS);
4815       } else {
4816         V0 = Builder.CreateCast(
4817             static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy);
4818         V1 = Builder.CreateCast(
4819             static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy);
4820       }
4821 
4822       // Create shuffle to take alternate operations from the vector.
4823       // Also, gather up main and alt scalar ops to propagate IR flags to
4824       // each vector operation.
4825       ValueList OpScalars, AltScalars;
4826       unsigned e = E->Scalars.size();
4827       SmallVector<int, 8> Mask(e);
4828       for (unsigned i = 0; i < e; ++i) {
4829         auto *OpInst = cast<Instruction>(E->Scalars[i]);
4830         assert(E->isOpcodeOrAlt(OpInst) && "Unexpected main/alternate opcode");
4831         if (OpInst->getOpcode() == E->getAltOpcode()) {
4832           Mask[i] = e + i;
4833           AltScalars.push_back(E->Scalars[i]);
4834         } else {
4835           Mask[i] = i;
4836           OpScalars.push_back(E->Scalars[i]);
4837         }
4838       }
4839 
4840       propagateIRFlags(V0, OpScalars);
4841       propagateIRFlags(V1, AltScalars);
4842 
4843       Value *V = Builder.CreateShuffleVector(V0, V1, Mask);
4844       if (Instruction *I = dyn_cast<Instruction>(V))
4845         V = propagateMetadata(I, E->Scalars);
4846       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
4847       V = ShuffleBuilder.finalize(V);
4848 
4849       E->VectorizedValue = V;
4850       ++NumVectorInstructions;
4851 
4852       return V;
4853     }
4854     default:
4855     llvm_unreachable("unknown inst");
4856   }
4857   return nullptr;
4858 }
4859 
4860 Value *BoUpSLP::vectorizeTree() {
4861   ExtraValueToDebugLocsMap ExternallyUsedValues;
4862   return vectorizeTree(ExternallyUsedValues);
4863 }
4864 
4865 Value *
4866 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) {
4867   // All blocks must be scheduled before any instructions are inserted.
4868   for (auto &BSIter : BlocksSchedules) {
4869     scheduleBlock(BSIter.second.get());
4870   }
4871 
4872   Builder.SetInsertPoint(&F->getEntryBlock().front());
4873   auto *VectorRoot = vectorizeTree(VectorizableTree[0].get());
4874 
4875   // If the vectorized tree can be rewritten in a smaller type, we truncate the
4876   // vectorized root. InstCombine will then rewrite the entire expression. We
4877   // sign extend the extracted values below.
4878   auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
4879   if (MinBWs.count(ScalarRoot)) {
4880     if (auto *I = dyn_cast<Instruction>(VectorRoot))
4881       Builder.SetInsertPoint(&*++BasicBlock::iterator(I));
4882     auto BundleWidth = VectorizableTree[0]->Scalars.size();
4883     auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
4884     auto *VecTy = FixedVectorType::get(MinTy, BundleWidth);
4885     auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy);
4886     VectorizableTree[0]->VectorizedValue = Trunc;
4887   }
4888 
4889   LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size()
4890                     << " values .\n");
4891 
4892   // If necessary, sign-extend or zero-extend ScalarRoot to the larger type
4893   // specified by ScalarType.
4894   auto extend = [&](Value *ScalarRoot, Value *Ex, Type *ScalarType) {
4895     if (!MinBWs.count(ScalarRoot))
4896       return Ex;
4897     if (MinBWs[ScalarRoot].second)
4898       return Builder.CreateSExt(Ex, ScalarType);
4899     return Builder.CreateZExt(Ex, ScalarType);
4900   };
4901 
4902   // Extract all of the elements with the external uses.
4903   for (const auto &ExternalUse : ExternalUses) {
4904     Value *Scalar = ExternalUse.Scalar;
4905     llvm::User *User = ExternalUse.User;
4906 
4907     // Skip users that we already RAUW. This happens when one instruction
4908     // has multiple uses of the same value.
4909     if (User && !is_contained(Scalar->users(), User))
4910       continue;
4911     TreeEntry *E = getTreeEntry(Scalar);
4912     assert(E && "Invalid scalar");
4913     assert(E->State != TreeEntry::NeedToGather &&
4914            "Extracting from a gather list");
4915 
4916     Value *Vec = E->VectorizedValue;
4917     assert(Vec && "Can't find vectorizable value");
4918 
4919     Value *Lane = Builder.getInt32(ExternalUse.Lane);
4920     // If User == nullptr, the Scalar is used as extra arg. Generate
4921     // ExtractElement instruction and update the record for this scalar in
4922     // ExternallyUsedValues.
4923     if (!User) {
4924       assert(ExternallyUsedValues.count(Scalar) &&
4925              "Scalar with nullptr as an external user must be registered in "
4926              "ExternallyUsedValues map");
4927       if (auto *VecI = dyn_cast<Instruction>(Vec)) {
4928         Builder.SetInsertPoint(VecI->getParent(),
4929                                std::next(VecI->getIterator()));
4930       } else {
4931         Builder.SetInsertPoint(&F->getEntryBlock().front());
4932       }
4933       Value *Ex = Builder.CreateExtractElement(Vec, Lane);
4934       Ex = extend(ScalarRoot, Ex, Scalar->getType());
4935       CSEBlocks.insert(cast<Instruction>(Scalar)->getParent());
4936       auto &Locs = ExternallyUsedValues[Scalar];
4937       ExternallyUsedValues.insert({Ex, Locs});
4938       ExternallyUsedValues.erase(Scalar);
4939       // Required to update internally referenced instructions.
4940       Scalar->replaceAllUsesWith(Ex);
4941       continue;
4942     }
4943 
4944     // Generate extracts for out-of-tree users.
4945     // Find the insertion point for the extractelement lane.
4946     if (auto *VecI = dyn_cast<Instruction>(Vec)) {
4947       if (PHINode *PH = dyn_cast<PHINode>(User)) {
4948         for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) {
4949           if (PH->getIncomingValue(i) == Scalar) {
4950             Instruction *IncomingTerminator =
4951                 PH->getIncomingBlock(i)->getTerminator();
4952             if (isa<CatchSwitchInst>(IncomingTerminator)) {
4953               Builder.SetInsertPoint(VecI->getParent(),
4954                                      std::next(VecI->getIterator()));
4955             } else {
4956               Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator());
4957             }
4958             Value *Ex = Builder.CreateExtractElement(Vec, Lane);
4959             Ex = extend(ScalarRoot, Ex, Scalar->getType());
4960             CSEBlocks.insert(PH->getIncomingBlock(i));
4961             PH->setOperand(i, Ex);
4962           }
4963         }
4964       } else {
4965         Builder.SetInsertPoint(cast<Instruction>(User));
4966         Value *Ex = Builder.CreateExtractElement(Vec, Lane);
4967         Ex = extend(ScalarRoot, Ex, Scalar->getType());
4968         CSEBlocks.insert(cast<Instruction>(User)->getParent());
4969         User->replaceUsesOfWith(Scalar, Ex);
4970       }
4971     } else {
4972       Builder.SetInsertPoint(&F->getEntryBlock().front());
4973       Value *Ex = Builder.CreateExtractElement(Vec, Lane);
4974       Ex = extend(ScalarRoot, Ex, Scalar->getType());
4975       CSEBlocks.insert(&F->getEntryBlock());
4976       User->replaceUsesOfWith(Scalar, Ex);
4977     }
4978 
4979     LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n");
4980   }
4981 
4982   // For each vectorized value:
4983   for (auto &TEPtr : VectorizableTree) {
4984     TreeEntry *Entry = TEPtr.get();
4985 
4986     // No need to handle users of gathered values.
4987     if (Entry->State == TreeEntry::NeedToGather)
4988       continue;
4989 
4990     assert(Entry->VectorizedValue && "Can't find vectorizable value");
4991 
4992     // For each lane:
4993     for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
4994       Value *Scalar = Entry->Scalars[Lane];
4995 
4996 #ifndef NDEBUG
4997       Type *Ty = Scalar->getType();
4998       if (!Ty->isVoidTy()) {
4999         for (User *U : Scalar->users()) {
5000           LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
5001 
5002           // It is legal to delete users in the ignorelist.
5003           assert((getTreeEntry(U) || is_contained(UserIgnoreList, U)) &&
5004                  "Deleting out-of-tree value");
5005         }
5006       }
5007 #endif
5008       LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
5009       eraseInstruction(cast<Instruction>(Scalar));
5010     }
5011   }
5012 
5013   Builder.ClearInsertionPoint();
5014   InstrElementSize.clear();
5015 
5016   return VectorizableTree[0]->VectorizedValue;
5017 }
5018 
5019 void BoUpSLP::optimizeGatherSequence() {
5020   LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size()
5021                     << " gather sequences instructions.\n");
5022   // LICM InsertElementInst sequences.
5023   for (Instruction *I : GatherSeq) {
5024     if (isDeleted(I))
5025       continue;
5026 
5027     // Check if this block is inside a loop.
5028     Loop *L = LI->getLoopFor(I->getParent());
5029     if (!L)
5030       continue;
5031 
5032     // Check if it has a preheader.
5033     BasicBlock *PreHeader = L->getLoopPreheader();
5034     if (!PreHeader)
5035       continue;
5036 
5037     // If the vector or the element that we insert into it are
5038     // instructions that are defined in this basic block then we can't
5039     // hoist this instruction.
5040     auto *Op0 = dyn_cast<Instruction>(I->getOperand(0));
5041     auto *Op1 = dyn_cast<Instruction>(I->getOperand(1));
5042     if (Op0 && L->contains(Op0))
5043       continue;
5044     if (Op1 && L->contains(Op1))
5045       continue;
5046 
5047     // We can hoist this instruction. Move it to the pre-header.
5048     I->moveBefore(PreHeader->getTerminator());
5049   }
5050 
5051   // Make a list of all reachable blocks in our CSE queue.
5052   SmallVector<const DomTreeNode *, 8> CSEWorkList;
5053   CSEWorkList.reserve(CSEBlocks.size());
5054   for (BasicBlock *BB : CSEBlocks)
5055     if (DomTreeNode *N = DT->getNode(BB)) {
5056       assert(DT->isReachableFromEntry(N));
5057       CSEWorkList.push_back(N);
5058     }
5059 
5060   // Sort blocks by domination. This ensures we visit a block after all blocks
5061   // dominating it are visited.
5062   llvm::stable_sort(CSEWorkList,
5063                     [this](const DomTreeNode *A, const DomTreeNode *B) {
5064                       return DT->properlyDominates(A, B);
5065                     });
5066 
5067   // Perform O(N^2) search over the gather sequences and merge identical
5068   // instructions. TODO: We can further optimize this scan if we split the
5069   // instructions into different buckets based on the insert lane.
5070   SmallVector<Instruction *, 16> Visited;
5071   for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) {
5072     assert(*I &&
5073            (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&
5074            "Worklist not sorted properly!");
5075     BasicBlock *BB = (*I)->getBlock();
5076     // For all instructions in blocks containing gather sequences:
5077     for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) {
5078       Instruction *In = &*it++;
5079       if (isDeleted(In))
5080         continue;
5081       if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In))
5082         continue;
5083 
5084       // Check if we can replace this instruction with any of the
5085       // visited instructions.
5086       for (Instruction *v : Visited) {
5087         if (In->isIdenticalTo(v) &&
5088             DT->dominates(v->getParent(), In->getParent())) {
5089           In->replaceAllUsesWith(v);
5090           eraseInstruction(In);
5091           In = nullptr;
5092           break;
5093         }
5094       }
5095       if (In) {
5096         assert(!is_contained(Visited, In));
5097         Visited.push_back(In);
5098       }
5099     }
5100   }
5101   CSEBlocks.clear();
5102   GatherSeq.clear();
5103 }
5104 
5105 // Groups the instructions to a bundle (which is then a single scheduling entity)
5106 // and schedules instructions until the bundle gets ready.
5107 Optional<BoUpSLP::ScheduleData *>
5108 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
5109                                             const InstructionsState &S) {
5110   if (isa<PHINode>(S.OpValue))
5111     return nullptr;
5112 
5113   // Initialize the instruction bundle.
5114   Instruction *OldScheduleEnd = ScheduleEnd;
5115   ScheduleData *PrevInBundle = nullptr;
5116   ScheduleData *Bundle = nullptr;
5117   bool ReSchedule = false;
5118   LLVM_DEBUG(dbgs() << "SLP:  bundle: " << *S.OpValue << "\n");
5119 
5120   // Make sure that the scheduling region contains all
5121   // instructions of the bundle.
5122   for (Value *V : VL) {
5123     if (!extendSchedulingRegion(V, S))
5124       return None;
5125   }
5126 
5127   for (Value *V : VL) {
5128     ScheduleData *BundleMember = getScheduleData(V);
5129     assert(BundleMember &&
5130            "no ScheduleData for bundle member (maybe not in same basic block)");
5131     if (BundleMember->IsScheduled) {
5132       // A bundle member was scheduled as single instruction before and now
5133       // needs to be scheduled as part of the bundle. We just get rid of the
5134       // existing schedule.
5135       LLVM_DEBUG(dbgs() << "SLP:  reset schedule because " << *BundleMember
5136                         << " was already scheduled\n");
5137       ReSchedule = true;
5138     }
5139     assert(BundleMember->isSchedulingEntity() &&
5140            "bundle member already part of other bundle");
5141     if (PrevInBundle) {
5142       PrevInBundle->NextInBundle = BundleMember;
5143     } else {
5144       Bundle = BundleMember;
5145     }
5146     BundleMember->UnscheduledDepsInBundle = 0;
5147     Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps;
5148 
5149     // Group the instructions to a bundle.
5150     BundleMember->FirstInBundle = Bundle;
5151     PrevInBundle = BundleMember;
5152   }
5153   if (ScheduleEnd != OldScheduleEnd) {
5154     // The scheduling region got new instructions at the lower end (or it is a
5155     // new region for the first bundle). This makes it necessary to
5156     // recalculate all dependencies.
5157     // It is seldom that this needs to be done a second time after adding the
5158     // initial bundle to the region.
5159     for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
5160       doForAllOpcodes(I, [](ScheduleData *SD) {
5161         SD->clearDependencies();
5162       });
5163     }
5164     ReSchedule = true;
5165   }
5166   if (ReSchedule) {
5167     resetSchedule();
5168     initialFillReadyList(ReadyInsts);
5169   }
5170   assert(Bundle && "Failed to find schedule bundle");
5171 
5172   LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block "
5173                     << BB->getName() << "\n");
5174 
5175   calculateDependencies(Bundle, true, SLP);
5176 
5177   // Now try to schedule the new bundle. As soon as the bundle is "ready" it
5178   // means that there are no cyclic dependencies and we can schedule it.
5179   // Note that's important that we don't "schedule" the bundle yet (see
5180   // cancelScheduling).
5181   while (!Bundle->isReady() && !ReadyInsts.empty()) {
5182 
5183     ScheduleData *pickedSD = ReadyInsts.back();
5184     ReadyInsts.pop_back();
5185 
5186     if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) {
5187       schedule(pickedSD, ReadyInsts);
5188     }
5189   }
5190   if (!Bundle->isReady()) {
5191     cancelScheduling(VL, S.OpValue);
5192     return None;
5193   }
5194   return Bundle;
5195 }
5196 
5197 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL,
5198                                                 Value *OpValue) {
5199   if (isa<PHINode>(OpValue))
5200     return;
5201 
5202   ScheduleData *Bundle = getScheduleData(OpValue);
5203   LLVM_DEBUG(dbgs() << "SLP:  cancel scheduling of " << *Bundle << "\n");
5204   assert(!Bundle->IsScheduled &&
5205          "Can't cancel bundle which is already scheduled");
5206   assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() &&
5207          "tried to unbundle something which is not a bundle");
5208 
5209   // Un-bundle: make single instructions out of the bundle.
5210   ScheduleData *BundleMember = Bundle;
5211   while (BundleMember) {
5212     assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links");
5213     BundleMember->FirstInBundle = BundleMember;
5214     ScheduleData *Next = BundleMember->NextInBundle;
5215     BundleMember->NextInBundle = nullptr;
5216     BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps;
5217     if (BundleMember->UnscheduledDepsInBundle == 0) {
5218       ReadyInsts.insert(BundleMember);
5219     }
5220     BundleMember = Next;
5221   }
5222 }
5223 
5224 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() {
5225   // Allocate a new ScheduleData for the instruction.
5226   if (ChunkPos >= ChunkSize) {
5227     ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize));
5228     ChunkPos = 0;
5229   }
5230   return &(ScheduleDataChunks.back()[ChunkPos++]);
5231 }
5232 
5233 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V,
5234                                                       const InstructionsState &S) {
5235   if (getScheduleData(V, isOneOf(S, V)))
5236     return true;
5237   Instruction *I = dyn_cast<Instruction>(V);
5238   assert(I && "bundle member must be an instruction");
5239   assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled");
5240   auto &&CheckSheduleForI = [this, &S](Instruction *I) -> bool {
5241     ScheduleData *ISD = getScheduleData(I);
5242     if (!ISD)
5243       return false;
5244     assert(isInSchedulingRegion(ISD) &&
5245            "ScheduleData not in scheduling region");
5246     ScheduleData *SD = allocateScheduleDataChunks();
5247     SD->Inst = I;
5248     SD->init(SchedulingRegionID, S.OpValue);
5249     ExtraScheduleDataMap[I][S.OpValue] = SD;
5250     return true;
5251   };
5252   if (CheckSheduleForI(I))
5253     return true;
5254   if (!ScheduleStart) {
5255     // It's the first instruction in the new region.
5256     initScheduleData(I, I->getNextNode(), nullptr, nullptr);
5257     ScheduleStart = I;
5258     ScheduleEnd = I->getNextNode();
5259     if (isOneOf(S, I) != I)
5260       CheckSheduleForI(I);
5261     assert(ScheduleEnd && "tried to vectorize a terminator?");
5262     LLVM_DEBUG(dbgs() << "SLP:  initialize schedule region to " << *I << "\n");
5263     return true;
5264   }
5265   // Search up and down at the same time, because we don't know if the new
5266   // instruction is above or below the existing scheduling region.
5267   BasicBlock::reverse_iterator UpIter =
5268       ++ScheduleStart->getIterator().getReverse();
5269   BasicBlock::reverse_iterator UpperEnd = BB->rend();
5270   BasicBlock::iterator DownIter = ScheduleEnd->getIterator();
5271   BasicBlock::iterator LowerEnd = BB->end();
5272   while (true) {
5273     if (++ScheduleRegionSize > ScheduleRegionSizeLimit) {
5274       LLVM_DEBUG(dbgs() << "SLP:  exceeded schedule region size limit\n");
5275       return false;
5276     }
5277 
5278     if (UpIter != UpperEnd) {
5279       if (&*UpIter == I) {
5280         initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion);
5281         ScheduleStart = I;
5282         if (isOneOf(S, I) != I)
5283           CheckSheduleForI(I);
5284         LLVM_DEBUG(dbgs() << "SLP:  extend schedule region start to " << *I
5285                           << "\n");
5286         return true;
5287       }
5288       ++UpIter;
5289     }
5290     if (DownIter != LowerEnd) {
5291       if (&*DownIter == I) {
5292         initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion,
5293                          nullptr);
5294         ScheduleEnd = I->getNextNode();
5295         if (isOneOf(S, I) != I)
5296           CheckSheduleForI(I);
5297         assert(ScheduleEnd && "tried to vectorize a terminator?");
5298         LLVM_DEBUG(dbgs() << "SLP:  extend schedule region end to " << *I
5299                           << "\n");
5300         return true;
5301       }
5302       ++DownIter;
5303     }
5304     assert((UpIter != UpperEnd || DownIter != LowerEnd) &&
5305            "instruction not found in block");
5306   }
5307   return true;
5308 }
5309 
5310 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI,
5311                                                 Instruction *ToI,
5312                                                 ScheduleData *PrevLoadStore,
5313                                                 ScheduleData *NextLoadStore) {
5314   ScheduleData *CurrentLoadStore = PrevLoadStore;
5315   for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) {
5316     ScheduleData *SD = ScheduleDataMap[I];
5317     if (!SD) {
5318       SD = allocateScheduleDataChunks();
5319       ScheduleDataMap[I] = SD;
5320       SD->Inst = I;
5321     }
5322     assert(!isInSchedulingRegion(SD) &&
5323            "new ScheduleData already in scheduling region");
5324     SD->init(SchedulingRegionID, I);
5325 
5326     if (I->mayReadOrWriteMemory() &&
5327         (!isa<IntrinsicInst>(I) ||
5328          (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect &&
5329           cast<IntrinsicInst>(I)->getIntrinsicID() !=
5330               Intrinsic::pseudoprobe))) {
5331       // Update the linked list of memory accessing instructions.
5332       if (CurrentLoadStore) {
5333         CurrentLoadStore->NextLoadStore = SD;
5334       } else {
5335         FirstLoadStoreInRegion = SD;
5336       }
5337       CurrentLoadStore = SD;
5338     }
5339   }
5340   if (NextLoadStore) {
5341     if (CurrentLoadStore)
5342       CurrentLoadStore->NextLoadStore = NextLoadStore;
5343   } else {
5344     LastLoadStoreInRegion = CurrentLoadStore;
5345   }
5346 }
5347 
5348 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
5349                                                      bool InsertInReadyList,
5350                                                      BoUpSLP *SLP) {
5351   assert(SD->isSchedulingEntity());
5352 
5353   SmallVector<ScheduleData *, 10> WorkList;
5354   WorkList.push_back(SD);
5355 
5356   while (!WorkList.empty()) {
5357     ScheduleData *SD = WorkList.back();
5358     WorkList.pop_back();
5359 
5360     ScheduleData *BundleMember = SD;
5361     while (BundleMember) {
5362       assert(isInSchedulingRegion(BundleMember));
5363       if (!BundleMember->hasValidDependencies()) {
5364 
5365         LLVM_DEBUG(dbgs() << "SLP:       update deps of " << *BundleMember
5366                           << "\n");
5367         BundleMember->Dependencies = 0;
5368         BundleMember->resetUnscheduledDeps();
5369 
5370         // Handle def-use chain dependencies.
5371         if (BundleMember->OpValue != BundleMember->Inst) {
5372           ScheduleData *UseSD = getScheduleData(BundleMember->Inst);
5373           if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
5374             BundleMember->Dependencies++;
5375             ScheduleData *DestBundle = UseSD->FirstInBundle;
5376             if (!DestBundle->IsScheduled)
5377               BundleMember->incrementUnscheduledDeps(1);
5378             if (!DestBundle->hasValidDependencies())
5379               WorkList.push_back(DestBundle);
5380           }
5381         } else {
5382           for (User *U : BundleMember->Inst->users()) {
5383             if (isa<Instruction>(U)) {
5384               ScheduleData *UseSD = getScheduleData(U);
5385               if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
5386                 BundleMember->Dependencies++;
5387                 ScheduleData *DestBundle = UseSD->FirstInBundle;
5388                 if (!DestBundle->IsScheduled)
5389                   BundleMember->incrementUnscheduledDeps(1);
5390                 if (!DestBundle->hasValidDependencies())
5391                   WorkList.push_back(DestBundle);
5392               }
5393             } else {
5394               // I'm not sure if this can ever happen. But we need to be safe.
5395               // This lets the instruction/bundle never be scheduled and
5396               // eventually disable vectorization.
5397               BundleMember->Dependencies++;
5398               BundleMember->incrementUnscheduledDeps(1);
5399             }
5400           }
5401         }
5402 
5403         // Handle the memory dependencies.
5404         ScheduleData *DepDest = BundleMember->NextLoadStore;
5405         if (DepDest) {
5406           Instruction *SrcInst = BundleMember->Inst;
5407           MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA);
5408           bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
5409           unsigned numAliased = 0;
5410           unsigned DistToSrc = 1;
5411 
5412           while (DepDest) {
5413             assert(isInSchedulingRegion(DepDest));
5414 
5415             // We have two limits to reduce the complexity:
5416             // 1) AliasedCheckLimit: It's a small limit to reduce calls to
5417             //    SLP->isAliased (which is the expensive part in this loop).
5418             // 2) MaxMemDepDistance: It's for very large blocks and it aborts
5419             //    the whole loop (even if the loop is fast, it's quadratic).
5420             //    It's important for the loop break condition (see below) to
5421             //    check this limit even between two read-only instructions.
5422             if (DistToSrc >= MaxMemDepDistance ||
5423                     ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
5424                      (numAliased >= AliasedCheckLimit ||
5425                       SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
5426 
5427               // We increment the counter only if the locations are aliased
5428               // (instead of counting all alias checks). This gives a better
5429               // balance between reduced runtime and accurate dependencies.
5430               numAliased++;
5431 
5432               DepDest->MemoryDependencies.push_back(BundleMember);
5433               BundleMember->Dependencies++;
5434               ScheduleData *DestBundle = DepDest->FirstInBundle;
5435               if (!DestBundle->IsScheduled) {
5436                 BundleMember->incrementUnscheduledDeps(1);
5437               }
5438               if (!DestBundle->hasValidDependencies()) {
5439                 WorkList.push_back(DestBundle);
5440               }
5441             }
5442             DepDest = DepDest->NextLoadStore;
5443 
5444             // Example, explaining the loop break condition: Let's assume our
5445             // starting instruction is i0 and MaxMemDepDistance = 3.
5446             //
5447             //                      +--------v--v--v
5448             //             i0,i1,i2,i3,i4,i5,i6,i7,i8
5449             //             +--------^--^--^
5450             //
5451             // MaxMemDepDistance let us stop alias-checking at i3 and we add
5452             // dependencies from i0 to i3,i4,.. (even if they are not aliased).
5453             // Previously we already added dependencies from i3 to i6,i7,i8
5454             // (because of MaxMemDepDistance). As we added a dependency from
5455             // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
5456             // and we can abort this loop at i6.
5457             if (DistToSrc >= 2 * MaxMemDepDistance)
5458               break;
5459             DistToSrc++;
5460           }
5461         }
5462       }
5463       BundleMember = BundleMember->NextInBundle;
5464     }
5465     if (InsertInReadyList && SD->isReady()) {
5466       ReadyInsts.push_back(SD);
5467       LLVM_DEBUG(dbgs() << "SLP:     gets ready on update: " << *SD->Inst
5468                         << "\n");
5469     }
5470   }
5471 }
5472 
5473 void BoUpSLP::BlockScheduling::resetSchedule() {
5474   assert(ScheduleStart &&
5475          "tried to reset schedule on block which has not been scheduled");
5476   for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
5477     doForAllOpcodes(I, [&](ScheduleData *SD) {
5478       assert(isInSchedulingRegion(SD) &&
5479              "ScheduleData not in scheduling region");
5480       SD->IsScheduled = false;
5481       SD->resetUnscheduledDeps();
5482     });
5483   }
5484   ReadyInsts.clear();
5485 }
5486 
5487 void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
5488   if (!BS->ScheduleStart)
5489     return;
5490 
5491   LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n");
5492 
5493   BS->resetSchedule();
5494 
5495   // For the real scheduling we use a more sophisticated ready-list: it is
5496   // sorted by the original instruction location. This lets the final schedule
5497   // be as  close as possible to the original instruction order.
5498   struct ScheduleDataCompare {
5499     bool operator()(ScheduleData *SD1, ScheduleData *SD2) const {
5500       return SD2->SchedulingPriority < SD1->SchedulingPriority;
5501     }
5502   };
5503   std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts;
5504 
5505   // Ensure that all dependency data is updated and fill the ready-list with
5506   // initial instructions.
5507   int Idx = 0;
5508   int NumToSchedule = 0;
5509   for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd;
5510        I = I->getNextNode()) {
5511     BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) {
5512       assert(SD->isPartOfBundle() ==
5513                  (getTreeEntry(SD->Inst) != nullptr) &&
5514              "scheduler and vectorizer bundle mismatch");
5515       SD->FirstInBundle->SchedulingPriority = Idx++;
5516       if (SD->isSchedulingEntity()) {
5517         BS->calculateDependencies(SD, false, this);
5518         NumToSchedule++;
5519       }
5520     });
5521   }
5522   BS->initialFillReadyList(ReadyInsts);
5523 
5524   Instruction *LastScheduledInst = BS->ScheduleEnd;
5525 
5526   // Do the "real" scheduling.
5527   while (!ReadyInsts.empty()) {
5528     ScheduleData *picked = *ReadyInsts.begin();
5529     ReadyInsts.erase(ReadyInsts.begin());
5530 
5531     // Move the scheduled instruction(s) to their dedicated places, if not
5532     // there yet.
5533     ScheduleData *BundleMember = picked;
5534     while (BundleMember) {
5535       Instruction *pickedInst = BundleMember->Inst;
5536       if (LastScheduledInst->getNextNode() != pickedInst) {
5537         BS->BB->getInstList().remove(pickedInst);
5538         BS->BB->getInstList().insert(LastScheduledInst->getIterator(),
5539                                      pickedInst);
5540       }
5541       LastScheduledInst = pickedInst;
5542       BundleMember = BundleMember->NextInBundle;
5543     }
5544 
5545     BS->schedule(picked, ReadyInsts);
5546     NumToSchedule--;
5547   }
5548   assert(NumToSchedule == 0 && "could not schedule all instructions");
5549 
5550   // Avoid duplicate scheduling of the block.
5551   BS->ScheduleStart = nullptr;
5552 }
5553 
5554 unsigned BoUpSLP::getVectorElementSize(Value *V) {
5555   // If V is a store, just return the width of the stored value (or value
5556   // truncated just before storing) without traversing the expression tree.
5557   // This is the common case.
5558   if (auto *Store = dyn_cast<StoreInst>(V)) {
5559     if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand()))
5560       return DL->getTypeSizeInBits(Trunc->getSrcTy());
5561     else
5562       return DL->getTypeSizeInBits(Store->getValueOperand()->getType());
5563   }
5564 
5565   auto E = InstrElementSize.find(V);
5566   if (E != InstrElementSize.end())
5567     return E->second;
5568 
5569   // If V is not a store, we can traverse the expression tree to find loads
5570   // that feed it. The type of the loaded value may indicate a more suitable
5571   // width than V's type. We want to base the vector element size on the width
5572   // of memory operations where possible.
5573   SmallVector<Instruction *, 16> Worklist;
5574   SmallPtrSet<Instruction *, 16> Visited;
5575   if (auto *I = dyn_cast<Instruction>(V)) {
5576     Worklist.push_back(I);
5577     Visited.insert(I);
5578   }
5579 
5580   // Traverse the expression tree in bottom-up order looking for loads. If we
5581   // encounter an instruction we don't yet handle, we give up.
5582   auto MaxWidth = 0u;
5583   auto FoundUnknownInst = false;
5584   while (!Worklist.empty() && !FoundUnknownInst) {
5585     auto *I = Worklist.pop_back_val();
5586 
5587     // We should only be looking at scalar instructions here. If the current
5588     // instruction has a vector type, give up.
5589     auto *Ty = I->getType();
5590     if (isa<VectorType>(Ty))
5591       FoundUnknownInst = true;
5592 
5593     // If the current instruction is a load, update MaxWidth to reflect the
5594     // width of the loaded value.
5595     else if (isa<LoadInst>(I))
5596       MaxWidth = std::max<unsigned>(MaxWidth, DL->getTypeSizeInBits(Ty));
5597 
5598     // Otherwise, we need to visit the operands of the instruction. We only
5599     // handle the interesting cases from buildTree here. If an operand is an
5600     // instruction we haven't yet visited, we add it to the worklist.
5601     else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
5602              isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I)) {
5603       for (Use &U : I->operands())
5604         if (auto *J = dyn_cast<Instruction>(U.get()))
5605           if (Visited.insert(J).second)
5606             Worklist.push_back(J);
5607     }
5608 
5609     // If we don't yet handle the instruction, give up.
5610     else
5611       FoundUnknownInst = true;
5612   }
5613 
5614   int Width = MaxWidth;
5615   // If we didn't encounter a memory access in the expression tree, or if we
5616   // gave up for some reason, just return the width of V. Otherwise, return the
5617   // maximum width we found.
5618   if (!MaxWidth || FoundUnknownInst)
5619     Width = DL->getTypeSizeInBits(V->getType());
5620 
5621   for (Instruction *I : Visited)
5622     InstrElementSize[I] = Width;
5623 
5624   return Width;
5625 }
5626 
5627 // Determine if a value V in a vectorizable expression Expr can be demoted to a
5628 // smaller type with a truncation. We collect the values that will be demoted
5629 // in ToDemote and additional roots that require investigating in Roots.
5630 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr,
5631                                   SmallVectorImpl<Value *> &ToDemote,
5632                                   SmallVectorImpl<Value *> &Roots) {
5633   // We can always demote constants.
5634   if (isa<Constant>(V)) {
5635     ToDemote.push_back(V);
5636     return true;
5637   }
5638 
5639   // If the value is not an instruction in the expression with only one use, it
5640   // cannot be demoted.
5641   auto *I = dyn_cast<Instruction>(V);
5642   if (!I || !I->hasOneUse() || !Expr.count(I))
5643     return false;
5644 
5645   switch (I->getOpcode()) {
5646 
5647   // We can always demote truncations and extensions. Since truncations can
5648   // seed additional demotion, we save the truncated value.
5649   case Instruction::Trunc:
5650     Roots.push_back(I->getOperand(0));
5651     break;
5652   case Instruction::ZExt:
5653   case Instruction::SExt:
5654     break;
5655 
5656   // We can demote certain binary operations if we can demote both of their
5657   // operands.
5658   case Instruction::Add:
5659   case Instruction::Sub:
5660   case Instruction::Mul:
5661   case Instruction::And:
5662   case Instruction::Or:
5663   case Instruction::Xor:
5664     if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) ||
5665         !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots))
5666       return false;
5667     break;
5668 
5669   // We can demote selects if we can demote their true and false values.
5670   case Instruction::Select: {
5671     SelectInst *SI = cast<SelectInst>(I);
5672     if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) ||
5673         !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots))
5674       return false;
5675     break;
5676   }
5677 
5678   // We can demote phis if we can demote all their incoming operands. Note that
5679   // we don't need to worry about cycles since we ensure single use above.
5680   case Instruction::PHI: {
5681     PHINode *PN = cast<PHINode>(I);
5682     for (Value *IncValue : PN->incoming_values())
5683       if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots))
5684         return false;
5685     break;
5686   }
5687 
5688   // Otherwise, conservatively give up.
5689   default:
5690     return false;
5691   }
5692 
5693   // Record the value that we can demote.
5694   ToDemote.push_back(V);
5695   return true;
5696 }
5697 
5698 void BoUpSLP::computeMinimumValueSizes() {
5699   // If there are no external uses, the expression tree must be rooted by a
5700   // store. We can't demote in-memory values, so there is nothing to do here.
5701   if (ExternalUses.empty())
5702     return;
5703 
5704   // We only attempt to truncate integer expressions.
5705   auto &TreeRoot = VectorizableTree[0]->Scalars;
5706   auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType());
5707   if (!TreeRootIT)
5708     return;
5709 
5710   // If the expression is not rooted by a store, these roots should have
5711   // external uses. We will rely on InstCombine to rewrite the expression in
5712   // the narrower type. However, InstCombine only rewrites single-use values.
5713   // This means that if a tree entry other than a root is used externally, it
5714   // must have multiple uses and InstCombine will not rewrite it. The code
5715   // below ensures that only the roots are used externally.
5716   SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end());
5717   for (auto &EU : ExternalUses)
5718     if (!Expr.erase(EU.Scalar))
5719       return;
5720   if (!Expr.empty())
5721     return;
5722 
5723   // Collect the scalar values of the vectorizable expression. We will use this
5724   // context to determine which values can be demoted. If we see a truncation,
5725   // we mark it as seeding another demotion.
5726   for (auto &EntryPtr : VectorizableTree)
5727     Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end());
5728 
5729   // Ensure the roots of the vectorizable tree don't form a cycle. They must
5730   // have a single external user that is not in the vectorizable tree.
5731   for (auto *Root : TreeRoot)
5732     if (!Root->hasOneUse() || Expr.count(*Root->user_begin()))
5733       return;
5734 
5735   // Conservatively determine if we can actually truncate the roots of the
5736   // expression. Collect the values that can be demoted in ToDemote and
5737   // additional roots that require investigating in Roots.
5738   SmallVector<Value *, 32> ToDemote;
5739   SmallVector<Value *, 4> Roots;
5740   for (auto *Root : TreeRoot)
5741     if (!collectValuesToDemote(Root, Expr, ToDemote, Roots))
5742       return;
5743 
5744   // The maximum bit width required to represent all the values that can be
5745   // demoted without loss of precision. It would be safe to truncate the roots
5746   // of the expression to this width.
5747   auto MaxBitWidth = 8u;
5748 
5749   // We first check if all the bits of the roots are demanded. If they're not,
5750   // we can truncate the roots to this narrower type.
5751   for (auto *Root : TreeRoot) {
5752     auto Mask = DB->getDemandedBits(cast<Instruction>(Root));
5753     MaxBitWidth = std::max<unsigned>(
5754         Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth);
5755   }
5756 
5757   // True if the roots can be zero-extended back to their original type, rather
5758   // than sign-extended. We know that if the leading bits are not demanded, we
5759   // can safely zero-extend. So we initialize IsKnownPositive to True.
5760   bool IsKnownPositive = true;
5761 
5762   // If all the bits of the roots are demanded, we can try a little harder to
5763   // compute a narrower type. This can happen, for example, if the roots are
5764   // getelementptr indices. InstCombine promotes these indices to the pointer
5765   // width. Thus, all their bits are technically demanded even though the
5766   // address computation might be vectorized in a smaller type.
5767   //
5768   // We start by looking at each entry that can be demoted. We compute the
5769   // maximum bit width required to store the scalar by using ValueTracking to
5770   // compute the number of high-order bits we can truncate.
5771   if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) &&
5772       llvm::all_of(TreeRoot, [](Value *R) {
5773         assert(R->hasOneUse() && "Root should have only one use!");
5774         return isa<GetElementPtrInst>(R->user_back());
5775       })) {
5776     MaxBitWidth = 8u;
5777 
5778     // Determine if the sign bit of all the roots is known to be zero. If not,
5779     // IsKnownPositive is set to False.
5780     IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) {
5781       KnownBits Known = computeKnownBits(R, *DL);
5782       return Known.isNonNegative();
5783     });
5784 
5785     // Determine the maximum number of bits required to store the scalar
5786     // values.
5787     for (auto *Scalar : ToDemote) {
5788       auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT);
5789       auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType());
5790       MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth);
5791     }
5792 
5793     // If we can't prove that the sign bit is zero, we must add one to the
5794     // maximum bit width to account for the unknown sign bit. This preserves
5795     // the existing sign bit so we can safely sign-extend the root back to the
5796     // original type. Otherwise, if we know the sign bit is zero, we will
5797     // zero-extend the root instead.
5798     //
5799     // FIXME: This is somewhat suboptimal, as there will be cases where adding
5800     //        one to the maximum bit width will yield a larger-than-necessary
5801     //        type. In general, we need to add an extra bit only if we can't
5802     //        prove that the upper bit of the original type is equal to the
5803     //        upper bit of the proposed smaller type. If these two bits are the
5804     //        same (either zero or one) we know that sign-extending from the
5805     //        smaller type will result in the same value. Here, since we can't
5806     //        yet prove this, we are just making the proposed smaller type
5807     //        larger to ensure correctness.
5808     if (!IsKnownPositive)
5809       ++MaxBitWidth;
5810   }
5811 
5812   // Round MaxBitWidth up to the next power-of-two.
5813   if (!isPowerOf2_64(MaxBitWidth))
5814     MaxBitWidth = NextPowerOf2(MaxBitWidth);
5815 
5816   // If the maximum bit width we compute is less than the with of the roots'
5817   // type, we can proceed with the narrowing. Otherwise, do nothing.
5818   if (MaxBitWidth >= TreeRootIT->getBitWidth())
5819     return;
5820 
5821   // If we can truncate the root, we must collect additional values that might
5822   // be demoted as a result. That is, those seeded by truncations we will
5823   // modify.
5824   while (!Roots.empty())
5825     collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots);
5826 
5827   // Finally, map the values we can demote to the maximum bit with we computed.
5828   for (auto *Scalar : ToDemote)
5829     MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive);
5830 }
5831 
5832 namespace {
5833 
5834 /// The SLPVectorizer Pass.
5835 struct SLPVectorizer : public FunctionPass {
5836   SLPVectorizerPass Impl;
5837 
5838   /// Pass identification, replacement for typeid
5839   static char ID;
5840 
5841   explicit SLPVectorizer() : FunctionPass(ID) {
5842     initializeSLPVectorizerPass(*PassRegistry::getPassRegistry());
5843   }
5844 
5845   bool doInitialization(Module &M) override {
5846     return false;
5847   }
5848 
5849   bool runOnFunction(Function &F) override {
5850     if (skipFunction(F))
5851       return false;
5852 
5853     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
5854     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
5855     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
5856     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
5857     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
5858     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
5859     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5860     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
5861     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
5862     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
5863 
5864     return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE);
5865   }
5866 
5867   void getAnalysisUsage(AnalysisUsage &AU) const override {
5868     FunctionPass::getAnalysisUsage(AU);
5869     AU.addRequired<AssumptionCacheTracker>();
5870     AU.addRequired<ScalarEvolutionWrapperPass>();
5871     AU.addRequired<AAResultsWrapperPass>();
5872     AU.addRequired<TargetTransformInfoWrapperPass>();
5873     AU.addRequired<LoopInfoWrapperPass>();
5874     AU.addRequired<DominatorTreeWrapperPass>();
5875     AU.addRequired<DemandedBitsWrapperPass>();
5876     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
5877     AU.addRequired<InjectTLIMappingsLegacy>();
5878     AU.addPreserved<LoopInfoWrapperPass>();
5879     AU.addPreserved<DominatorTreeWrapperPass>();
5880     AU.addPreserved<AAResultsWrapperPass>();
5881     AU.addPreserved<GlobalsAAWrapperPass>();
5882     AU.setPreservesCFG();
5883   }
5884 };
5885 
5886 } // end anonymous namespace
5887 
5888 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) {
5889   auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
5890   auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
5891   auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F);
5892   auto *AA = &AM.getResult<AAManager>(F);
5893   auto *LI = &AM.getResult<LoopAnalysis>(F);
5894   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
5895   auto *AC = &AM.getResult<AssumptionAnalysis>(F);
5896   auto *DB = &AM.getResult<DemandedBitsAnalysis>(F);
5897   auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
5898 
5899   bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE);
5900   if (!Changed)
5901     return PreservedAnalyses::all();
5902 
5903   PreservedAnalyses PA;
5904   PA.preserveSet<CFGAnalyses>();
5905   PA.preserve<AAManager>();
5906   PA.preserve<GlobalsAA>();
5907   return PA;
5908 }
5909 
5910 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
5911                                 TargetTransformInfo *TTI_,
5912                                 TargetLibraryInfo *TLI_, AAResults *AA_,
5913                                 LoopInfo *LI_, DominatorTree *DT_,
5914                                 AssumptionCache *AC_, DemandedBits *DB_,
5915                                 OptimizationRemarkEmitter *ORE_) {
5916   if (!RunSLPVectorization)
5917     return false;
5918   SE = SE_;
5919   TTI = TTI_;
5920   TLI = TLI_;
5921   AA = AA_;
5922   LI = LI_;
5923   DT = DT_;
5924   AC = AC_;
5925   DB = DB_;
5926   DL = &F.getParent()->getDataLayout();
5927 
5928   Stores.clear();
5929   GEPs.clear();
5930   bool Changed = false;
5931 
5932   // If the target claims to have no vector registers don't attempt
5933   // vectorization.
5934   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)))
5935     return false;
5936 
5937   // Don't vectorize when the attribute NoImplicitFloat is used.
5938   if (F.hasFnAttribute(Attribute::NoImplicitFloat))
5939     return false;
5940 
5941   LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
5942 
5943   // Use the bottom up slp vectorizer to construct chains that start with
5944   // store instructions.
5945   BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_);
5946 
5947   // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to
5948   // delete instructions.
5949 
5950   // Scan the blocks in the function in post order.
5951   for (auto BB : post_order(&F.getEntryBlock())) {
5952     collectSeedInstructions(BB);
5953 
5954     // Vectorize trees that end at stores.
5955     if (!Stores.empty()) {
5956       LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()
5957                         << " underlying objects.\n");
5958       Changed |= vectorizeStoreChains(R);
5959     }
5960 
5961     // Vectorize trees that end at reductions.
5962     Changed |= vectorizeChainsInBlock(BB, R);
5963 
5964     // Vectorize the index computations of getelementptr instructions. This
5965     // is primarily intended to catch gather-like idioms ending at
5966     // non-consecutive loads.
5967     if (!GEPs.empty()) {
5968       LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()
5969                         << " underlying objects.\n");
5970       Changed |= vectorizeGEPIndices(BB, R);
5971     }
5972   }
5973 
5974   if (Changed) {
5975     R.optimizeGatherSequence();
5976     LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
5977   }
5978   return Changed;
5979 }
5980 
5981 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
5982                                             unsigned Idx) {
5983   LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size()
5984                     << "\n");
5985   const unsigned Sz = R.getVectorElementSize(Chain[0]);
5986   const unsigned MinVF = R.getMinVecRegSize() / Sz;
5987   unsigned VF = Chain.size();
5988 
5989   if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF)
5990     return false;
5991 
5992   LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx
5993                     << "\n");
5994 
5995   R.buildTree(Chain);
5996   Optional<ArrayRef<unsigned>> Order = R.bestOrder();
5997   // TODO: Handle orders of size less than number of elements in the vector.
5998   if (Order && Order->size() == Chain.size()) {
5999     // TODO: reorder tree nodes without tree rebuilding.
6000     SmallVector<Value *, 4> ReorderedOps(Chain.rbegin(), Chain.rend());
6001     llvm::transform(*Order, ReorderedOps.begin(),
6002                     [Chain](const unsigned Idx) { return Chain[Idx]; });
6003     R.buildTree(ReorderedOps);
6004   }
6005   if (R.isTreeTinyAndNotFullyVectorizable())
6006     return false;
6007   if (R.isLoadCombineCandidate())
6008     return false;
6009 
6010   R.computeMinimumValueSizes();
6011 
6012   int Cost = R.getTreeCost();
6013 
6014   LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF =" << VF << "\n");
6015   if (Cost < -SLPCostThreshold) {
6016     LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n");
6017 
6018     using namespace ore;
6019 
6020     R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized",
6021                                         cast<StoreInst>(Chain[0]))
6022                      << "Stores SLP vectorized with cost " << NV("Cost", Cost)
6023                      << " and with tree size "
6024                      << NV("TreeSize", R.getTreeSize()));
6025 
6026     R.vectorizeTree();
6027     return true;
6028   }
6029 
6030   return false;
6031 }
6032 
6033 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores,
6034                                         BoUpSLP &R) {
6035   // We may run into multiple chains that merge into a single chain. We mark the
6036   // stores that we vectorized so that we don't visit the same store twice.
6037   BoUpSLP::ValueSet VectorizedStores;
6038   bool Changed = false;
6039 
6040   int E = Stores.size();
6041   SmallBitVector Tails(E, false);
6042   SmallVector<int, 16> ConsecutiveChain(E, E + 1);
6043   int MaxIter = MaxStoreLookup.getValue();
6044   int IterCnt;
6045   auto &&FindConsecutiveAccess = [this, &Stores, &Tails, &IterCnt, MaxIter,
6046                                   &ConsecutiveChain](int K, int Idx) {
6047     if (IterCnt >= MaxIter)
6048       return true;
6049     ++IterCnt;
6050     if (!isConsecutiveAccess(Stores[K], Stores[Idx], *DL, *SE))
6051       return false;
6052 
6053     Tails.set(Idx);
6054     ConsecutiveChain[K] = Idx;
6055     return true;
6056   };
6057   // Do a quadratic search on all of the given stores in reverse order and find
6058   // all of the pairs of stores that follow each other.
6059   for (int Idx = E - 1; Idx >= 0; --Idx) {
6060     // If a store has multiple consecutive store candidates, search according
6061     // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ...
6062     // This is because usually pairing with immediate succeeding or preceding
6063     // candidate create the best chance to find slp vectorization opportunity.
6064     const int MaxLookDepth = std::max(E - Idx, Idx + 1);
6065     IterCnt = 0;
6066     for (int Offset = 1, F = MaxLookDepth; Offset < F; ++Offset)
6067       if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) ||
6068           (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx)))
6069         break;
6070   }
6071 
6072   // For stores that start but don't end a link in the chain:
6073   for (int Cnt = E; Cnt > 0; --Cnt) {
6074     int I = Cnt - 1;
6075     if (ConsecutiveChain[I] == E + 1 || Tails.test(I))
6076       continue;
6077     // We found a store instr that starts a chain. Now follow the chain and try
6078     // to vectorize it.
6079     BoUpSLP::ValueList Operands;
6080     // Collect the chain into a list.
6081     while (I != E + 1 && !VectorizedStores.count(Stores[I])) {
6082       Operands.push_back(Stores[I]);
6083       // Move to the next value in the chain.
6084       I = ConsecutiveChain[I];
6085     }
6086 
6087     // If a vector register can't hold 1 element, we are done.
6088     unsigned MaxVecRegSize = R.getMaxVecRegSize();
6089     unsigned EltSize = R.getVectorElementSize(Operands[0]);
6090     if (MaxVecRegSize % EltSize != 0)
6091       continue;
6092 
6093     unsigned MaxElts = MaxVecRegSize / EltSize;
6094     // FIXME: Is division-by-2 the correct step? Should we assert that the
6095     // register size is a power-of-2?
6096     unsigned StartIdx = 0;
6097     for (unsigned Size = llvm::PowerOf2Ceil(MaxElts); Size >= 2; Size /= 2) {
6098       for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) {
6099         ArrayRef<Value *> Slice = makeArrayRef(Operands).slice(Cnt, Size);
6100         if (!VectorizedStores.count(Slice.front()) &&
6101             !VectorizedStores.count(Slice.back()) &&
6102             vectorizeStoreChain(Slice, R, Cnt)) {
6103           // Mark the vectorized stores so that we don't vectorize them again.
6104           VectorizedStores.insert(Slice.begin(), Slice.end());
6105           Changed = true;
6106           // If we vectorized initial block, no need to try to vectorize it
6107           // again.
6108           if (Cnt == StartIdx)
6109             StartIdx += Size;
6110           Cnt += Size;
6111           continue;
6112         }
6113         ++Cnt;
6114       }
6115       // Check if the whole array was vectorized already - exit.
6116       if (StartIdx >= Operands.size())
6117         break;
6118     }
6119   }
6120 
6121   return Changed;
6122 }
6123 
6124 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) {
6125   // Initialize the collections. We will make a single pass over the block.
6126   Stores.clear();
6127   GEPs.clear();
6128 
6129   // Visit the store and getelementptr instructions in BB and organize them in
6130   // Stores and GEPs according to the underlying objects of their pointer
6131   // operands.
6132   for (Instruction &I : *BB) {
6133     // Ignore store instructions that are volatile or have a pointer operand
6134     // that doesn't point to a scalar type.
6135     if (auto *SI = dyn_cast<StoreInst>(&I)) {
6136       if (!SI->isSimple())
6137         continue;
6138       if (!isValidElementType(SI->getValueOperand()->getType()))
6139         continue;
6140       Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI);
6141     }
6142 
6143     // Ignore getelementptr instructions that have more than one index, a
6144     // constant index, or a pointer operand that doesn't point to a scalar
6145     // type.
6146     else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
6147       auto Idx = GEP->idx_begin()->get();
6148       if (GEP->getNumIndices() > 1 || isa<Constant>(Idx))
6149         continue;
6150       if (!isValidElementType(Idx->getType()))
6151         continue;
6152       if (GEP->getType()->isVectorTy())
6153         continue;
6154       GEPs[GEP->getPointerOperand()].push_back(GEP);
6155     }
6156   }
6157 }
6158 
6159 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) {
6160   if (!A || !B)
6161     return false;
6162   Value *VL[] = {A, B};
6163   return tryToVectorizeList(VL, R, /*AllowReorder=*/true);
6164 }
6165 
6166 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
6167                                            bool AllowReorder,
6168                                            ArrayRef<Value *> InsertUses) {
6169   if (VL.size() < 2)
6170     return false;
6171 
6172   LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = "
6173                     << VL.size() << ".\n");
6174 
6175   // Check that all of the parts are instructions of the same type,
6176   // we permit an alternate opcode via InstructionsState.
6177   InstructionsState S = getSameOpcode(VL);
6178   if (!S.getOpcode())
6179     return false;
6180 
6181   Instruction *I0 = cast<Instruction>(S.OpValue);
6182   // Make sure invalid types (including vector type) are rejected before
6183   // determining vectorization factor for scalar instructions.
6184   for (Value *V : VL) {
6185     Type *Ty = V->getType();
6186     if (!isValidElementType(Ty)) {
6187       // NOTE: the following will give user internal llvm type name, which may
6188       // not be useful.
6189       R.getORE()->emit([&]() {
6190         std::string type_str;
6191         llvm::raw_string_ostream rso(type_str);
6192         Ty->print(rso);
6193         return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0)
6194                << "Cannot SLP vectorize list: type "
6195                << rso.str() + " is unsupported by vectorizer";
6196       });
6197       return false;
6198     }
6199   }
6200 
6201   unsigned Sz = R.getVectorElementSize(I0);
6202   unsigned MinVF = std::max(2U, R.getMinVecRegSize() / Sz);
6203   unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF);
6204   MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF);
6205   if (MaxVF < 2) {
6206     R.getORE()->emit([&]() {
6207       return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0)
6208              << "Cannot SLP vectorize list: vectorization factor "
6209              << "less than 2 is not supported";
6210     });
6211     return false;
6212   }
6213 
6214   bool Changed = false;
6215   bool CandidateFound = false;
6216   int MinCost = SLPCostThreshold;
6217 
6218   bool CompensateUseCost =
6219       !InsertUses.empty() && llvm::all_of(InsertUses, [](const Value *V) {
6220         return V && isa<InsertElementInst>(V);
6221       });
6222   assert((!CompensateUseCost || InsertUses.size() == VL.size()) &&
6223          "Each scalar expected to have an associated InsertElement user.");
6224 
6225   unsigned NextInst = 0, MaxInst = VL.size();
6226   for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) {
6227     // No actual vectorization should happen, if number of parts is the same as
6228     // provided vectorization factor (i.e. the scalar type is used for vector
6229     // code during codegen).
6230     auto *VecTy = FixedVectorType::get(VL[0]->getType(), VF);
6231     if (TTI->getNumberOfParts(VecTy) == VF)
6232       continue;
6233     for (unsigned I = NextInst; I < MaxInst; ++I) {
6234       unsigned OpsWidth = 0;
6235 
6236       if (I + VF > MaxInst)
6237         OpsWidth = MaxInst - I;
6238       else
6239         OpsWidth = VF;
6240 
6241       if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2)
6242         break;
6243 
6244       ArrayRef<Value *> Ops = VL.slice(I, OpsWidth);
6245       // Check that a previous iteration of this loop did not delete the Value.
6246       if (llvm::any_of(Ops, [&R](Value *V) {
6247             auto *I = dyn_cast<Instruction>(V);
6248             return I && R.isDeleted(I);
6249           }))
6250         continue;
6251 
6252       LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "
6253                         << "\n");
6254 
6255       R.buildTree(Ops);
6256       Optional<ArrayRef<unsigned>> Order = R.bestOrder();
6257       // TODO: check if we can allow reordering for more cases.
6258       if (AllowReorder && Order) {
6259         // TODO: reorder tree nodes without tree rebuilding.
6260         // Conceptually, there is nothing actually preventing us from trying to
6261         // reorder a larger list. In fact, we do exactly this when vectorizing
6262         // reductions. However, at this point, we only expect to get here when
6263         // there are exactly two operations.
6264         assert(Ops.size() == 2);
6265         Value *ReorderedOps[] = {Ops[1], Ops[0]};
6266         R.buildTree(ReorderedOps, None);
6267       }
6268       if (R.isTreeTinyAndNotFullyVectorizable())
6269         continue;
6270 
6271       R.computeMinimumValueSizes();
6272       int Cost = R.getTreeCost();
6273       CandidateFound = true;
6274       if (CompensateUseCost) {
6275         // TODO: Use TTI's getScalarizationOverhead for sequence of inserts
6276         // rather than sum of single inserts as the latter may overestimate
6277         // cost. This work should imply improving cost estimation for extracts
6278         // that added in for external (for vectorization tree) users,i.e. that
6279         // part should also switch to same interface.
6280         // For example, the following case is projected code after SLP:
6281         //  %4 = extractelement <4 x i64> %3, i32 0
6282         //  %v0 = insertelement <4 x i64> undef, i64 %4, i32 0
6283         //  %5 = extractelement <4 x i64> %3, i32 1
6284         //  %v1 = insertelement <4 x i64> %v0, i64 %5, i32 1
6285         //  %6 = extractelement <4 x i64> %3, i32 2
6286         //  %v2 = insertelement <4 x i64> %v1, i64 %6, i32 2
6287         //  %7 = extractelement <4 x i64> %3, i32 3
6288         //  %v3 = insertelement <4 x i64> %v2, i64 %7, i32 3
6289         //
6290         // Extracts here added by SLP in order to feed users (the inserts) of
6291         // original scalars and contribute to "ExtractCost" at cost evaluation.
6292         // The inserts in turn form sequence to build an aggregate that
6293         // detected by findBuildAggregate routine.
6294         // SLP makes an assumption that such sequence will be optimized away
6295         // later (instcombine) so it tries to compensate ExctractCost with
6296         // cost of insert sequence.
6297         // Current per element cost calculation approach is not quite accurate
6298         // and tends to create bias toward favoring vectorization.
6299         // Switching to the TTI interface might help a bit.
6300         // Alternative solution could be pattern-match to detect a no-op or
6301         // shuffle.
6302         unsigned UserCost = 0;
6303         for (unsigned Lane = 0; Lane < OpsWidth; Lane++) {
6304           auto *IE = cast<InsertElementInst>(InsertUses[I + Lane]);
6305           if (auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2)))
6306             UserCost += TTI->getVectorInstrCost(
6307                 Instruction::InsertElement, IE->getType(), CI->getZExtValue());
6308         }
6309         LLVM_DEBUG(dbgs() << "SLP: Compensate cost of users by: " << UserCost
6310                           << ".\n");
6311         Cost -= UserCost;
6312       }
6313 
6314       MinCost = std::min(MinCost, Cost);
6315 
6316       if (Cost < -SLPCostThreshold) {
6317         LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n");
6318         R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList",
6319                                                     cast<Instruction>(Ops[0]))
6320                                  << "SLP vectorized with cost " << ore::NV("Cost", Cost)
6321                                  << " and with tree size "
6322                                  << ore::NV("TreeSize", R.getTreeSize()));
6323 
6324         R.vectorizeTree();
6325         // Move to the next bundle.
6326         I += VF - 1;
6327         NextInst = I + 1;
6328         Changed = true;
6329       }
6330     }
6331   }
6332 
6333   if (!Changed && CandidateFound) {
6334     R.getORE()->emit([&]() {
6335       return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0)
6336              << "List vectorization was possible but not beneficial with cost "
6337              << ore::NV("Cost", MinCost) << " >= "
6338              << ore::NV("Treshold", -SLPCostThreshold);
6339     });
6340   } else if (!Changed) {
6341     R.getORE()->emit([&]() {
6342       return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0)
6343              << "Cannot SLP vectorize list: vectorization was impossible"
6344              << " with available vectorization factors";
6345     });
6346   }
6347   return Changed;
6348 }
6349 
6350 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) {
6351   if (!I)
6352     return false;
6353 
6354   if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I))
6355     return false;
6356 
6357   Value *P = I->getParent();
6358 
6359   // Vectorize in current basic block only.
6360   auto *Op0 = dyn_cast<Instruction>(I->getOperand(0));
6361   auto *Op1 = dyn_cast<Instruction>(I->getOperand(1));
6362   if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P)
6363     return false;
6364 
6365   // Try to vectorize V.
6366   if (tryToVectorizePair(Op0, Op1, R))
6367     return true;
6368 
6369   auto *A = dyn_cast<BinaryOperator>(Op0);
6370   auto *B = dyn_cast<BinaryOperator>(Op1);
6371   // Try to skip B.
6372   if (B && B->hasOneUse()) {
6373     auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0));
6374     auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1));
6375     if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R))
6376       return true;
6377     if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R))
6378       return true;
6379   }
6380 
6381   // Try to skip A.
6382   if (A && A->hasOneUse()) {
6383     auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0));
6384     auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1));
6385     if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R))
6386       return true;
6387     if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R))
6388       return true;
6389   }
6390   return false;
6391 }
6392 
6393 /// Generate a shuffle mask to be used in a reduction tree.
6394 ///
6395 /// \param VecLen The length of the vector to be reduced.
6396 /// \param NumEltsToRdx The number of elements that should be reduced in the
6397 ///        vector.
6398 /// \param IsPairwise Whether the reduction is a pairwise or splitting
6399 ///        reduction. A pairwise reduction will generate a mask of
6400 ///        <0,2,...> or <1,3,..> while a splitting reduction will generate
6401 ///        <2,3, undef,undef> for a vector of 4 and NumElts = 2.
6402 /// \param IsLeft True will generate a mask of even elements, odd otherwise.
6403 static SmallVector<int, 32> createRdxShuffleMask(unsigned VecLen,
6404                                                  unsigned NumEltsToRdx,
6405                                                  bool IsPairwise, bool IsLeft) {
6406   assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask");
6407 
6408   SmallVector<int, 32> ShuffleMask(VecLen, -1);
6409 
6410   if (IsPairwise)
6411     // Build a mask of 0, 2, ... (left) or 1, 3, ... (right).
6412     for (unsigned i = 0; i != NumEltsToRdx; ++i)
6413       ShuffleMask[i] = 2 * i + !IsLeft;
6414   else
6415     // Move the upper half of the vector to the lower half.
6416     for (unsigned i = 0; i != NumEltsToRdx; ++i)
6417       ShuffleMask[i] = NumEltsToRdx + i;
6418 
6419   return ShuffleMask;
6420 }
6421 
6422 namespace {
6423 
6424 /// Model horizontal reductions.
6425 ///
6426 /// A horizontal reduction is a tree of reduction operations (currently add and
6427 /// fadd) that has operations that can be put into a vector as its leaf.
6428 /// For example, this tree:
6429 ///
6430 /// mul mul mul mul
6431 ///  \  /    \  /
6432 ///   +       +
6433 ///    \     /
6434 ///       +
6435 /// This tree has "mul" as its reduced values and "+" as its reduction
6436 /// operations. A reduction might be feeding into a store or a binary operation
6437 /// feeding a phi.
6438 ///    ...
6439 ///    \  /
6440 ///     +
6441 ///     |
6442 ///  phi +=
6443 ///
6444 ///  Or:
6445 ///    ...
6446 ///    \  /
6447 ///     +
6448 ///     |
6449 ///   *p =
6450 ///
6451 class HorizontalReduction {
6452   using ReductionOpsType = SmallVector<Value *, 16>;
6453   using ReductionOpsListType = SmallVector<ReductionOpsType, 2>;
6454   ReductionOpsListType  ReductionOps;
6455   SmallVector<Value *, 32> ReducedVals;
6456   // Use map vector to make stable output.
6457   MapVector<Instruction *, Value *> ExtraArgs;
6458 
6459   /// Kind of the reduction data.
6460   enum ReductionKind {
6461     RK_None,       /// Not a reduction.
6462     RK_Arithmetic, /// Binary reduction data.
6463     RK_SMin,       /// Signed minimum reduction data.
6464     RK_UMin,       /// Unsigned minimum reduction data.
6465     RK_SMax,       /// Signed maximum reduction data.
6466     RK_UMax,       /// Unsigned maximum reduction data.
6467   };
6468 
6469   /// Contains info about operation, like its opcode, left and right operands.
6470   class OperationData {
6471     /// Opcode of the instruction.
6472     unsigned Opcode = 0;
6473 
6474     /// Kind of the reduction operation.
6475     ReductionKind Kind = RK_None;
6476 
6477     /// Checks if the reduction operation can be vectorized.
6478     bool isVectorizable() const {
6479       // We currently only support add/mul/logical && min/max reductions.
6480       return ((Kind == RK_Arithmetic &&
6481                (Opcode == Instruction::Add || Opcode == Instruction::FAdd ||
6482                 Opcode == Instruction::Mul || Opcode == Instruction::FMul ||
6483                 Opcode == Instruction::And || Opcode == Instruction::Or ||
6484                 Opcode == Instruction::Xor)) ||
6485               (Opcode == Instruction::ICmp &&
6486                (Kind == RK_SMin || Kind == RK_SMax ||
6487                 Kind == RK_UMin || Kind == RK_UMax)));
6488     }
6489 
6490     /// Creates reduction operation with the current opcode.
6491     Value *createOp(IRBuilder<> &Builder, Value *LHS, Value *RHS,
6492                     const Twine &Name) const {
6493       assert(isVectorizable() &&
6494              "Expected add|fadd or min/max reduction operation.");
6495       Value *Cmp = nullptr;
6496       switch (Kind) {
6497       case RK_Arithmetic:
6498         return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, LHS, RHS,
6499                                    Name);
6500       case RK_SMin:
6501         assert(Opcode == Instruction::ICmp && "Expected integer types.");
6502         Cmp = Builder.CreateICmpSLT(LHS, RHS);
6503         return Builder.CreateSelect(Cmp, LHS, RHS, Name);
6504       case RK_SMax:
6505         assert(Opcode == Instruction::ICmp && "Expected integer types.");
6506         Cmp = Builder.CreateICmpSGT(LHS, RHS);
6507         return Builder.CreateSelect(Cmp, LHS, RHS, Name);
6508       case RK_UMin:
6509         assert(Opcode == Instruction::ICmp && "Expected integer types.");
6510         Cmp = Builder.CreateICmpULT(LHS, RHS);
6511         return Builder.CreateSelect(Cmp, LHS, RHS, Name);
6512       case RK_UMax:
6513         assert(Opcode == Instruction::ICmp && "Expected integer types.");
6514         Cmp = Builder.CreateICmpUGT(LHS, RHS);
6515         return Builder.CreateSelect(Cmp, LHS, RHS, Name);
6516       case RK_None:
6517         break;
6518       }
6519       llvm_unreachable("Unknown reduction operation.");
6520     }
6521 
6522   public:
6523     explicit OperationData() = default;
6524 
6525     /// Construction for reduced values. They are identified by opcode only and
6526     /// don't have associated LHS/RHS values.
6527     explicit OperationData(Instruction &I) {
6528       Opcode = I.getOpcode();
6529     }
6530 
6531     /// Constructor for reduction operations with opcode and its left and
6532     /// right operands.
6533     OperationData(unsigned Opcode, ReductionKind Kind)
6534         : Opcode(Opcode), Kind(Kind) {
6535       assert(Kind != RK_None && "One of the reduction operations is expected.");
6536     }
6537 
6538     explicit operator bool() const { return Opcode; }
6539 
6540     /// Return true if this operation is any kind of minimum or maximum.
6541     bool isMinMax() const {
6542       switch (Kind) {
6543       case RK_Arithmetic:
6544         return false;
6545       case RK_SMin:
6546       case RK_SMax:
6547       case RK_UMin:
6548       case RK_UMax:
6549         return true;
6550       case RK_None:
6551         break;
6552       }
6553       llvm_unreachable("Reduction kind is not set");
6554     }
6555 
6556     /// Get the index of the first operand.
6557     unsigned getFirstOperandIndex() const {
6558       assert(!!*this && "The opcode is not set.");
6559       // We allow calling this before 'Kind' is set, so handle that specially.
6560       if (Kind == RK_None)
6561         return 0;
6562       return isMinMax() ? 1 : 0;
6563     }
6564 
6565     /// Total number of operands in the reduction operation.
6566     unsigned getNumberOfOperands() const {
6567       assert(Kind != RK_None && !!*this && "Expected reduction operation.");
6568       return isMinMax() ? 3 : 2;
6569     }
6570 
6571     /// Checks if the instruction is in basic block \p BB.
6572     /// For a min/max reduction check that both compare and select are in \p BB.
6573     bool hasSameParent(Instruction *I, BasicBlock *BB, bool IsRedOp) const {
6574       assert(Kind != RK_None && !!*this && "Expected reduction operation.");
6575       if (IsRedOp && isMinMax()) {
6576         auto *Cmp = cast<Instruction>(cast<SelectInst>(I)->getCondition());
6577         return I->getParent() == BB && Cmp && Cmp->getParent() == BB;
6578       }
6579       return I->getParent() == BB;
6580     }
6581 
6582     /// Expected number of uses for reduction operations/reduced values.
6583     bool hasRequiredNumberOfUses(Instruction *I, bool IsReductionOp) const {
6584       assert(Kind != RK_None && !!*this && "Expected reduction operation.");
6585       // SelectInst must be used twice while the condition op must have single
6586       // use only.
6587       if (isMinMax())
6588         return I->hasNUses(2) &&
6589                (!IsReductionOp ||
6590                 cast<SelectInst>(I)->getCondition()->hasOneUse());
6591 
6592       // Arithmetic reduction operation must be used once only.
6593       return I->hasOneUse();
6594     }
6595 
6596     /// Initializes the list of reduction operations.
6597     void initReductionOps(ReductionOpsListType &ReductionOps) {
6598       assert(Kind != RK_None && !!*this && "Expected reduction operation.");
6599       if (isMinMax())
6600         ReductionOps.assign(2, ReductionOpsType());
6601       else
6602         ReductionOps.assign(1, ReductionOpsType());
6603     }
6604 
6605     /// Add all reduction operations for the reduction instruction \p I.
6606     void addReductionOps(Instruction *I, ReductionOpsListType &ReductionOps) {
6607       assert(Kind != RK_None && !!*this && "Expected reduction operation.");
6608       if (isMinMax()) {
6609         ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition());
6610         ReductionOps[1].emplace_back(I);
6611       } else {
6612         ReductionOps[0].emplace_back(I);
6613       }
6614     }
6615 
6616     /// Checks if instruction is associative and can be vectorized.
6617     bool isAssociative(Instruction *I) const {
6618       assert(Kind != RK_None && *this && "Expected reduction operation.");
6619       switch (Kind) {
6620       case RK_Arithmetic:
6621         return I->isAssociative();
6622       case RK_SMin:
6623       case RK_SMax:
6624       case RK_UMin:
6625       case RK_UMax:
6626         assert(Opcode == Instruction::ICmp &&
6627                "Only integer compare operation is expected.");
6628         return true;
6629       case RK_None:
6630         break;
6631       }
6632       llvm_unreachable("Reduction kind is not set");
6633     }
6634 
6635     /// Checks if the reduction operation can be vectorized.
6636     bool isVectorizable(Instruction *I) const {
6637       return isVectorizable() && isAssociative(I);
6638     }
6639 
6640     /// Checks if two operation data are both a reduction op or both a reduced
6641     /// value.
6642     bool operator==(const OperationData &OD) const {
6643       assert(((Kind != OD.Kind) || (Opcode != 0 && OD.Opcode != 0)) &&
6644              "One of the comparing operations is incorrect.");
6645       return Kind == OD.Kind && Opcode == OD.Opcode;
6646     }
6647     bool operator!=(const OperationData &OD) const { return !(*this == OD); }
6648     void clear() {
6649       Opcode = 0;
6650       Kind = RK_None;
6651     }
6652 
6653     /// Get the opcode of the reduction operation.
6654     unsigned getOpcode() const {
6655       assert(isVectorizable() && "Expected vectorizable operation.");
6656       return Opcode;
6657     }
6658 
6659     /// Get kind of reduction data.
6660     ReductionKind getKind() const { return Kind; }
6661     Value *getLHS(Instruction *I) const {
6662       if (Kind == RK_None)
6663         return nullptr;
6664       return I->getOperand(getFirstOperandIndex());
6665     }
6666     Value *getRHS(Instruction *I) const {
6667       if (Kind == RK_None)
6668         return nullptr;
6669       return I->getOperand(getFirstOperandIndex() + 1);
6670     }
6671 
6672     /// Creates reduction operation with the current opcode with the IR flags
6673     /// from \p ReductionOps.
6674     Value *createOp(IRBuilder<> &Builder, Value *LHS, Value *RHS,
6675                     const Twine &Name,
6676                     const ReductionOpsListType &ReductionOps) const {
6677       assert(isVectorizable() &&
6678              "Expected add|fadd or min/max reduction operation.");
6679       auto *Op = createOp(Builder, LHS, RHS, Name);
6680       switch (Kind) {
6681       case RK_Arithmetic:
6682         propagateIRFlags(Op, ReductionOps[0]);
6683         return Op;
6684       case RK_SMin:
6685       case RK_SMax:
6686       case RK_UMin:
6687       case RK_UMax:
6688         if (auto *SI = dyn_cast<SelectInst>(Op))
6689           propagateIRFlags(SI->getCondition(), ReductionOps[0]);
6690         propagateIRFlags(Op, ReductionOps[1]);
6691         return Op;
6692       case RK_None:
6693         break;
6694       }
6695       llvm_unreachable("Unknown reduction operation.");
6696     }
6697     /// Creates reduction operation with the current opcode with the IR flags
6698     /// from \p I.
6699     Value *createOp(IRBuilder<> &Builder, Value *LHS, Value *RHS,
6700                     const Twine &Name, Instruction *I) const {
6701       assert(isVectorizable() &&
6702              "Expected add|fadd or min/max reduction operation.");
6703       auto *Op = createOp(Builder, LHS, RHS, Name);
6704       switch (Kind) {
6705       case RK_Arithmetic:
6706         propagateIRFlags(Op, I);
6707         return Op;
6708       case RK_SMin:
6709       case RK_SMax:
6710       case RK_UMin:
6711       case RK_UMax:
6712         if (auto *SI = dyn_cast<SelectInst>(Op)) {
6713           propagateIRFlags(SI->getCondition(),
6714                            cast<SelectInst>(I)->getCondition());
6715         }
6716         propagateIRFlags(Op, I);
6717         return Op;
6718       case RK_None:
6719         break;
6720       }
6721       llvm_unreachable("Unknown reduction operation.");
6722     }
6723 
6724     TargetTransformInfo::ReductionFlags getFlags() const {
6725       TargetTransformInfo::ReductionFlags Flags;
6726       switch (Kind) {
6727       case RK_Arithmetic:
6728         break;
6729       case RK_SMin:
6730         Flags.IsSigned = true;
6731         Flags.IsMaxOp = false;
6732         break;
6733       case RK_SMax:
6734         Flags.IsSigned = true;
6735         Flags.IsMaxOp = true;
6736         break;
6737       case RK_UMin:
6738         Flags.IsSigned = false;
6739         Flags.IsMaxOp = false;
6740         break;
6741       case RK_UMax:
6742         Flags.IsSigned = false;
6743         Flags.IsMaxOp = true;
6744         break;
6745       case RK_None:
6746         llvm_unreachable("Reduction kind is not set");
6747       }
6748       return Flags;
6749     }
6750   };
6751 
6752   WeakTrackingVH ReductionRoot;
6753 
6754   /// The operation data of the reduction operation.
6755   OperationData ReductionData;
6756 
6757   /// The operation data of the values we perform a reduction on.
6758   OperationData ReducedValueData;
6759 
6760   /// Should we model this reduction as a pairwise reduction tree or a tree that
6761   /// splits the vector in halves and adds those halves.
6762   bool IsPairwiseReduction = false;
6763 
6764   /// Checks if the ParentStackElem.first should be marked as a reduction
6765   /// operation with an extra argument or as extra argument itself.
6766   void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem,
6767                     Value *ExtraArg) {
6768     if (ExtraArgs.count(ParentStackElem.first)) {
6769       ExtraArgs[ParentStackElem.first] = nullptr;
6770       // We ran into something like:
6771       // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg.
6772       // The whole ParentStackElem.first should be considered as an extra value
6773       // in this case.
6774       // Do not perform analysis of remaining operands of ParentStackElem.first
6775       // instruction, this whole instruction is an extra argument.
6776       ParentStackElem.second = ParentStackElem.first->getNumOperands();
6777     } else {
6778       // We ran into something like:
6779       // ParentStackElem.first += ... + ExtraArg + ...
6780       ExtraArgs[ParentStackElem.first] = ExtraArg;
6781     }
6782   }
6783 
6784   static OperationData getOperationData(Instruction *I) {
6785     if (!I)
6786       return OperationData();
6787 
6788     Value *LHS;
6789     Value *RHS;
6790     if (m_BinOp(m_Value(LHS), m_Value(RHS)).match(I)) {
6791       return OperationData(cast<BinaryOperator>(I)->getOpcode(), RK_Arithmetic);
6792     }
6793     if (auto *Select = dyn_cast<SelectInst>(I)) {
6794       // Look for a min/max pattern.
6795       if (m_UMin(m_Value(LHS), m_Value(RHS)).match(Select)) {
6796         return OperationData(Instruction::ICmp, RK_UMin);
6797       } else if (m_SMin(m_Value(LHS), m_Value(RHS)).match(Select)) {
6798         return OperationData(Instruction::ICmp, RK_SMin);
6799       } else if (m_UMax(m_Value(LHS), m_Value(RHS)).match(Select)) {
6800         return OperationData(Instruction::ICmp, RK_UMax);
6801       } else if (m_SMax(m_Value(LHS), m_Value(RHS)).match(Select)) {
6802         return OperationData(Instruction::ICmp, RK_SMax);
6803       } else {
6804         // Try harder: look for min/max pattern based on instructions producing
6805         // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2).
6806         // During the intermediate stages of SLP, it's very common to have
6807         // pattern like this (since optimizeGatherSequence is run only once
6808         // at the end):
6809         // %1 = extractelement <2 x i32> %a, i32 0
6810         // %2 = extractelement <2 x i32> %a, i32 1
6811         // %cond = icmp sgt i32 %1, %2
6812         // %3 = extractelement <2 x i32> %a, i32 0
6813         // %4 = extractelement <2 x i32> %a, i32 1
6814         // %select = select i1 %cond, i32 %3, i32 %4
6815         CmpInst::Predicate Pred;
6816         Instruction *L1;
6817         Instruction *L2;
6818 
6819         LHS = Select->getTrueValue();
6820         RHS = Select->getFalseValue();
6821         Value *Cond = Select->getCondition();
6822 
6823         // TODO: Support inverse predicates.
6824         if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) {
6825           if (!isa<ExtractElementInst>(RHS) ||
6826               !L2->isIdenticalTo(cast<Instruction>(RHS)))
6827             return OperationData(*I);
6828         } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) {
6829           if (!isa<ExtractElementInst>(LHS) ||
6830               !L1->isIdenticalTo(cast<Instruction>(LHS)))
6831             return OperationData(*I);
6832         } else {
6833           if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS))
6834             return OperationData(*I);
6835           if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) ||
6836               !L1->isIdenticalTo(cast<Instruction>(LHS)) ||
6837               !L2->isIdenticalTo(cast<Instruction>(RHS)))
6838             return OperationData(*I);
6839         }
6840         switch (Pred) {
6841         default:
6842           return OperationData(*I);
6843 
6844         case CmpInst::ICMP_ULT:
6845         case CmpInst::ICMP_ULE:
6846           return OperationData(Instruction::ICmp, RK_UMin);
6847 
6848         case CmpInst::ICMP_SLT:
6849         case CmpInst::ICMP_SLE:
6850           return OperationData(Instruction::ICmp, RK_SMin);
6851 
6852         case CmpInst::ICMP_UGT:
6853         case CmpInst::ICMP_UGE:
6854           return OperationData(Instruction::ICmp, RK_UMax);
6855 
6856         case CmpInst::ICMP_SGT:
6857         case CmpInst::ICMP_SGE:
6858           return OperationData(Instruction::ICmp, RK_SMax);
6859         }
6860       }
6861     }
6862     return OperationData(*I);
6863   }
6864 
6865 public:
6866   HorizontalReduction() = default;
6867 
6868   /// Try to find a reduction tree.
6869   bool matchAssociativeReduction(PHINode *Phi, Instruction *B) {
6870     assert((!Phi || is_contained(Phi->operands(), B)) &&
6871            "Thi phi needs to use the binary operator");
6872 
6873     ReductionData = getOperationData(B);
6874 
6875     // We could have a initial reductions that is not an add.
6876     //  r *= v1 + v2 + v3 + v4
6877     // In such a case start looking for a tree rooted in the first '+'.
6878     if (Phi) {
6879       if (ReductionData.getLHS(B) == Phi) {
6880         Phi = nullptr;
6881         B = dyn_cast<Instruction>(ReductionData.getRHS(B));
6882         ReductionData = getOperationData(B);
6883       } else if (ReductionData.getRHS(B) == Phi) {
6884         Phi = nullptr;
6885         B = dyn_cast<Instruction>(ReductionData.getLHS(B));
6886         ReductionData = getOperationData(B);
6887       }
6888     }
6889 
6890     if (!ReductionData.isVectorizable(B))
6891       return false;
6892 
6893     Type *Ty = B->getType();
6894     if (!isValidElementType(Ty))
6895       return false;
6896     if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy())
6897       return false;
6898 
6899     ReducedValueData.clear();
6900     ReductionRoot = B;
6901 
6902     // Post order traverse the reduction tree starting at B. We only handle true
6903     // trees containing only binary operators.
6904     SmallVector<std::pair<Instruction *, unsigned>, 32> Stack;
6905     Stack.push_back(std::make_pair(B, ReductionData.getFirstOperandIndex()));
6906     ReductionData.initReductionOps(ReductionOps);
6907     while (!Stack.empty()) {
6908       Instruction *TreeN = Stack.back().first;
6909       unsigned EdgeToVist = Stack.back().second++;
6910       OperationData OpData = getOperationData(TreeN);
6911       bool IsReducedValue = OpData != ReductionData;
6912 
6913       // Postorder vist.
6914       if (IsReducedValue || EdgeToVist == OpData.getNumberOfOperands()) {
6915         if (IsReducedValue)
6916           ReducedVals.push_back(TreeN);
6917         else {
6918           auto I = ExtraArgs.find(TreeN);
6919           if (I != ExtraArgs.end() && !I->second) {
6920             // Check if TreeN is an extra argument of its parent operation.
6921             if (Stack.size() <= 1) {
6922               // TreeN can't be an extra argument as it is a root reduction
6923               // operation.
6924               return false;
6925             }
6926             // Yes, TreeN is an extra argument, do not add it to a list of
6927             // reduction operations.
6928             // Stack[Stack.size() - 2] always points to the parent operation.
6929             markExtraArg(Stack[Stack.size() - 2], TreeN);
6930             ExtraArgs.erase(TreeN);
6931           } else
6932             ReductionData.addReductionOps(TreeN, ReductionOps);
6933         }
6934         // Retract.
6935         Stack.pop_back();
6936         continue;
6937       }
6938 
6939       // Visit left or right.
6940       Value *NextV = TreeN->getOperand(EdgeToVist);
6941       if (NextV != Phi) {
6942         auto *I = dyn_cast<Instruction>(NextV);
6943         OpData = getOperationData(I);
6944         // Continue analysis if the next operand is a reduction operation or
6945         // (possibly) a reduced value. If the reduced value opcode is not set,
6946         // the first met operation != reduction operation is considered as the
6947         // reduced value class.
6948         if (I && (!ReducedValueData || OpData == ReducedValueData ||
6949                   OpData == ReductionData)) {
6950           const bool IsReductionOperation = OpData == ReductionData;
6951           // Only handle trees in the current basic block.
6952           if (!ReductionData.hasSameParent(I, B->getParent(),
6953                                            IsReductionOperation)) {
6954             // I is an extra argument for TreeN (its parent operation).
6955             markExtraArg(Stack.back(), I);
6956             continue;
6957           }
6958 
6959           // Each tree node needs to have minimal number of users except for the
6960           // ultimate reduction.
6961           if (!ReductionData.hasRequiredNumberOfUses(I,
6962                                                      OpData == ReductionData) &&
6963               I != B) {
6964             // I is an extra argument for TreeN (its parent operation).
6965             markExtraArg(Stack.back(), I);
6966             continue;
6967           }
6968 
6969           if (IsReductionOperation) {
6970             // We need to be able to reassociate the reduction operations.
6971             if (!OpData.isAssociative(I)) {
6972               // I is an extra argument for TreeN (its parent operation).
6973               markExtraArg(Stack.back(), I);
6974               continue;
6975             }
6976           } else if (ReducedValueData &&
6977                      ReducedValueData != OpData) {
6978             // Make sure that the opcodes of the operations that we are going to
6979             // reduce match.
6980             // I is an extra argument for TreeN (its parent operation).
6981             markExtraArg(Stack.back(), I);
6982             continue;
6983           } else if (!ReducedValueData)
6984             ReducedValueData = OpData;
6985 
6986           Stack.push_back(std::make_pair(I, OpData.getFirstOperandIndex()));
6987           continue;
6988         }
6989       }
6990       // NextV is an extra argument for TreeN (its parent operation).
6991       markExtraArg(Stack.back(), NextV);
6992     }
6993     return true;
6994   }
6995 
6996   /// Attempt to vectorize the tree found by matchAssociativeReduction.
6997   bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) {
6998     // If there are a sufficient number of reduction values, reduce
6999     // to a nearby power-of-2. We can safely generate oversized
7000     // vectors and rely on the backend to split them to legal sizes.
7001     unsigned NumReducedVals = ReducedVals.size();
7002     if (NumReducedVals < 4)
7003       return false;
7004 
7005     // FIXME: Fast-math-flags should be set based on the instructions in the
7006     //        reduction (not all of 'fast' are required).
7007     IRBuilder<> Builder(cast<Instruction>(ReductionRoot));
7008     FastMathFlags Unsafe;
7009     Unsafe.setFast();
7010     Builder.setFastMathFlags(Unsafe);
7011 
7012     BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues;
7013     // The same extra argument may be used several times, so log each attempt
7014     // to use it.
7015     for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) {
7016       assert(Pair.first && "DebugLoc must be set.");
7017       ExternallyUsedValues[Pair.second].push_back(Pair.first);
7018     }
7019 
7020     // The compare instruction of a min/max is the insertion point for new
7021     // instructions and may be replaced with a new compare instruction.
7022     auto getCmpForMinMaxReduction = [](Instruction *RdxRootInst) {
7023       assert(isa<SelectInst>(RdxRootInst) &&
7024              "Expected min/max reduction to have select root instruction");
7025       Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition();
7026       assert(isa<Instruction>(ScalarCond) &&
7027              "Expected min/max reduction to have compare condition");
7028       return cast<Instruction>(ScalarCond);
7029     };
7030 
7031     // The reduction root is used as the insertion point for new instructions,
7032     // so set it as externally used to prevent it from being deleted.
7033     ExternallyUsedValues[ReductionRoot];
7034     SmallVector<Value *, 16> IgnoreList;
7035     for (ReductionOpsType &RdxOp : ReductionOps)
7036       IgnoreList.append(RdxOp.begin(), RdxOp.end());
7037 
7038     unsigned ReduxWidth = PowerOf2Floor(NumReducedVals);
7039     if (NumReducedVals > ReduxWidth) {
7040       // In the loop below, we are building a tree based on a window of
7041       // 'ReduxWidth' values.
7042       // If the operands of those values have common traits (compare predicate,
7043       // constant operand, etc), then we want to group those together to
7044       // minimize the cost of the reduction.
7045 
7046       // TODO: This should be extended to count common operands for
7047       //       compares and binops.
7048 
7049       // Step 1: Count the number of times each compare predicate occurs.
7050       SmallDenseMap<unsigned, unsigned> PredCountMap;
7051       for (Value *RdxVal : ReducedVals) {
7052         CmpInst::Predicate Pred;
7053         if (match(RdxVal, m_Cmp(Pred, m_Value(), m_Value())))
7054           ++PredCountMap[Pred];
7055       }
7056       // Step 2: Sort the values so the most common predicates come first.
7057       stable_sort(ReducedVals, [&PredCountMap](Value *A, Value *B) {
7058         CmpInst::Predicate PredA, PredB;
7059         if (match(A, m_Cmp(PredA, m_Value(), m_Value())) &&
7060             match(B, m_Cmp(PredB, m_Value(), m_Value()))) {
7061           return PredCountMap[PredA] > PredCountMap[PredB];
7062         }
7063         return false;
7064       });
7065     }
7066 
7067     Value *VectorizedTree = nullptr;
7068     unsigned i = 0;
7069     while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) {
7070       ArrayRef<Value *> VL(&ReducedVals[i], ReduxWidth);
7071       V.buildTree(VL, ExternallyUsedValues, IgnoreList);
7072       Optional<ArrayRef<unsigned>> Order = V.bestOrder();
7073       if (Order) {
7074         assert(Order->size() == VL.size() &&
7075                "Order size must be the same as number of vectorized "
7076                "instructions.");
7077         // TODO: reorder tree nodes without tree rebuilding.
7078         SmallVector<Value *, 4> ReorderedOps(VL.size());
7079         llvm::transform(*Order, ReorderedOps.begin(),
7080                         [VL](const unsigned Idx) { return VL[Idx]; });
7081         V.buildTree(ReorderedOps, ExternallyUsedValues, IgnoreList);
7082       }
7083       if (V.isTreeTinyAndNotFullyVectorizable())
7084         break;
7085       if (V.isLoadCombineReductionCandidate(ReductionData.getOpcode()))
7086         break;
7087 
7088       V.computeMinimumValueSizes();
7089 
7090       // Estimate cost.
7091       int TreeCost = V.getTreeCost();
7092       int ReductionCost = getReductionCost(TTI, ReducedVals[i], ReduxWidth);
7093       int Cost = TreeCost + ReductionCost;
7094       if (Cost >= -SLPCostThreshold) {
7095         V.getORE()->emit([&]() {
7096           return OptimizationRemarkMissed(SV_NAME, "HorSLPNotBeneficial",
7097                                           cast<Instruction>(VL[0]))
7098                  << "Vectorizing horizontal reduction is possible"
7099                  << "but not beneficial with cost " << ore::NV("Cost", Cost)
7100                  << " and threshold "
7101                  << ore::NV("Threshold", -SLPCostThreshold);
7102         });
7103         break;
7104       }
7105 
7106       LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:"
7107                         << Cost << ". (HorRdx)\n");
7108       V.getORE()->emit([&]() {
7109         return OptimizationRemark(SV_NAME, "VectorizedHorizontalReduction",
7110                                   cast<Instruction>(VL[0]))
7111                << "Vectorized horizontal reduction with cost "
7112                << ore::NV("Cost", Cost) << " and with tree size "
7113                << ore::NV("TreeSize", V.getTreeSize());
7114       });
7115 
7116       // Vectorize a tree.
7117       DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc();
7118       Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues);
7119 
7120       // Emit a reduction. For min/max, the root is a select, but the insertion
7121       // point is the compare condition of that select.
7122       Instruction *RdxRootInst = cast<Instruction>(ReductionRoot);
7123       if (ReductionData.isMinMax())
7124         Builder.SetInsertPoint(getCmpForMinMaxReduction(RdxRootInst));
7125       else
7126         Builder.SetInsertPoint(RdxRootInst);
7127 
7128       Value *ReducedSubTree =
7129           emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI);
7130 
7131       if (!VectorizedTree) {
7132         // Initialize the final value in the reduction.
7133         VectorizedTree = ReducedSubTree;
7134       } else {
7135         // Update the final value in the reduction.
7136         Builder.SetCurrentDebugLocation(Loc);
7137         VectorizedTree = ReductionData.createOp(
7138             Builder, VectorizedTree, ReducedSubTree, "op.rdx", ReductionOps);
7139       }
7140       i += ReduxWidth;
7141       ReduxWidth = PowerOf2Floor(NumReducedVals - i);
7142     }
7143 
7144     if (VectorizedTree) {
7145       // Finish the reduction.
7146       for (; i < NumReducedVals; ++i) {
7147         auto *I = cast<Instruction>(ReducedVals[i]);
7148         Builder.SetCurrentDebugLocation(I->getDebugLoc());
7149         VectorizedTree = ReductionData.createOp(Builder, VectorizedTree, I, "",
7150                                                 ReductionOps);
7151       }
7152       for (auto &Pair : ExternallyUsedValues) {
7153         // Add each externally used value to the final reduction.
7154         for (auto *I : Pair.second) {
7155           Builder.SetCurrentDebugLocation(I->getDebugLoc());
7156           VectorizedTree = ReductionData.createOp(Builder, VectorizedTree,
7157                                                   Pair.first, "op.extra", I);
7158         }
7159       }
7160 
7161       // Update users. For a min/max reduction that ends with a compare and
7162       // select, we also have to RAUW for the compare instruction feeding the
7163       // reduction root. That's because the original compare may have extra uses
7164       // besides the final select of the reduction.
7165       if (ReductionData.isMinMax()) {
7166         if (auto *VecSelect = dyn_cast<SelectInst>(VectorizedTree)) {
7167           Instruction *ScalarCmp =
7168               getCmpForMinMaxReduction(cast<Instruction>(ReductionRoot));
7169           ScalarCmp->replaceAllUsesWith(VecSelect->getCondition());
7170         }
7171       }
7172       ReductionRoot->replaceAllUsesWith(VectorizedTree);
7173 
7174       // Mark all scalar reduction ops for deletion, they are replaced by the
7175       // vector reductions.
7176       V.eraseInstructions(IgnoreList);
7177     }
7178     return VectorizedTree != nullptr;
7179   }
7180 
7181   unsigned numReductionValues() const {
7182     return ReducedVals.size();
7183   }
7184 
7185 private:
7186   /// Calculate the cost of a reduction.
7187   int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal,
7188                        unsigned ReduxWidth) {
7189     Type *ScalarTy = FirstReducedVal->getType();
7190     auto *VecTy = FixedVectorType::get(ScalarTy, ReduxWidth);
7191 
7192     int PairwiseRdxCost;
7193     int SplittingRdxCost;
7194     switch (ReductionData.getKind()) {
7195     case RK_Arithmetic:
7196       PairwiseRdxCost =
7197           TTI->getArithmeticReductionCost(ReductionData.getOpcode(), VecTy,
7198                                           /*IsPairwiseForm=*/true);
7199       SplittingRdxCost =
7200           TTI->getArithmeticReductionCost(ReductionData.getOpcode(), VecTy,
7201                                           /*IsPairwiseForm=*/false);
7202       break;
7203     case RK_SMin:
7204     case RK_SMax:
7205     case RK_UMin:
7206     case RK_UMax: {
7207       auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VecTy));
7208       bool IsUnsigned = ReductionData.getKind() == RK_UMin ||
7209                         ReductionData.getKind() == RK_UMax;
7210       PairwiseRdxCost =
7211           TTI->getMinMaxReductionCost(VecTy, VecCondTy,
7212                                       /*IsPairwiseForm=*/true, IsUnsigned);
7213       SplittingRdxCost =
7214           TTI->getMinMaxReductionCost(VecTy, VecCondTy,
7215                                       /*IsPairwiseForm=*/false, IsUnsigned);
7216       break;
7217     }
7218     case RK_None:
7219       llvm_unreachable("Expected arithmetic or min/max reduction operation");
7220     }
7221 
7222     IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost;
7223     int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost;
7224 
7225     int ScalarReduxCost = 0;
7226     switch (ReductionData.getKind()) {
7227     case RK_Arithmetic:
7228       ScalarReduxCost =
7229           TTI->getArithmeticInstrCost(ReductionData.getOpcode(), ScalarTy);
7230       break;
7231     case RK_SMin:
7232     case RK_SMax:
7233     case RK_UMin:
7234     case RK_UMax:
7235       ScalarReduxCost =
7236           TTI->getCmpSelInstrCost(ReductionData.getOpcode(), ScalarTy) +
7237           TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
7238                                   CmpInst::makeCmpResultType(ScalarTy));
7239       break;
7240     case RK_None:
7241       llvm_unreachable("Expected arithmetic or min/max reduction operation");
7242     }
7243     ScalarReduxCost *= (ReduxWidth - 1);
7244 
7245     LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost
7246                       << " for reduction that starts with " << *FirstReducedVal
7247                       << " (It is a "
7248                       << (IsPairwiseReduction ? "pairwise" : "splitting")
7249                       << " reduction)\n");
7250 
7251     return VecReduxCost - ScalarReduxCost;
7252   }
7253 
7254   /// Emit a horizontal reduction of the vectorized value.
7255   Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder,
7256                        unsigned ReduxWidth, const TargetTransformInfo *TTI) {
7257     assert(VectorizedValue && "Need to have a vectorized tree node");
7258     assert(isPowerOf2_32(ReduxWidth) &&
7259            "We only handle power-of-two reductions for now");
7260 
7261     if (!IsPairwiseReduction) {
7262       // FIXME: The builder should use an FMF guard. It should not be hard-coded
7263       //        to 'fast'.
7264       assert(Builder.getFastMathFlags().isFast() && "Expected 'fast' FMF");
7265       return createSimpleTargetReduction(
7266           Builder, TTI, ReductionData.getOpcode(), VectorizedValue,
7267           ReductionData.getFlags(), ReductionOps.back());
7268     }
7269 
7270     Value *TmpVec = VectorizedValue;
7271     for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) {
7272       auto LeftMask = createRdxShuffleMask(ReduxWidth, i, true, true);
7273       auto RightMask = createRdxShuffleMask(ReduxWidth, i, true, false);
7274 
7275       Value *LeftShuf =
7276           Builder.CreateShuffleVector(TmpVec, LeftMask, "rdx.shuf.l");
7277       Value *RightShuf =
7278           Builder.CreateShuffleVector(TmpVec, RightMask, "rdx.shuf.r");
7279       TmpVec = ReductionData.createOp(Builder, LeftShuf, RightShuf, "op.rdx",
7280                                       ReductionOps);
7281     }
7282 
7283     // The result is in the first element of the vector.
7284     return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0));
7285   }
7286 };
7287 
7288 } // end anonymous namespace
7289 
7290 static Optional<unsigned> getAggregateSize(Instruction *InsertInst) {
7291   if (auto *IE = dyn_cast<InsertElementInst>(InsertInst))
7292     return cast<FixedVectorType>(IE->getType())->getNumElements();
7293 
7294   unsigned AggregateSize = 1;
7295   auto *IV = cast<InsertValueInst>(InsertInst);
7296   Type *CurrentType = IV->getType();
7297   do {
7298     if (auto *ST = dyn_cast<StructType>(CurrentType)) {
7299       for (auto *Elt : ST->elements())
7300         if (Elt != ST->getElementType(0)) // check homogeneity
7301           return None;
7302       AggregateSize *= ST->getNumElements();
7303       CurrentType = ST->getElementType(0);
7304     } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) {
7305       AggregateSize *= AT->getNumElements();
7306       CurrentType = AT->getElementType();
7307     } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) {
7308       AggregateSize *= VT->getNumElements();
7309       return AggregateSize;
7310     } else if (CurrentType->isSingleValueType()) {
7311       return AggregateSize;
7312     } else {
7313       return None;
7314     }
7315   } while (true);
7316 }
7317 
7318 static Optional<unsigned> getOperandIndex(Instruction *InsertInst,
7319                                           unsigned OperandOffset) {
7320   unsigned OperandIndex = OperandOffset;
7321   if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) {
7322     if (auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2))) {
7323       auto *VT = cast<FixedVectorType>(IE->getType());
7324       OperandIndex *= VT->getNumElements();
7325       OperandIndex += CI->getZExtValue();
7326       return OperandIndex;
7327     }
7328     return None;
7329   }
7330 
7331   auto *IV = cast<InsertValueInst>(InsertInst);
7332   Type *CurrentType = IV->getType();
7333   for (unsigned int Index : IV->indices()) {
7334     if (auto *ST = dyn_cast<StructType>(CurrentType)) {
7335       OperandIndex *= ST->getNumElements();
7336       CurrentType = ST->getElementType(Index);
7337     } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) {
7338       OperandIndex *= AT->getNumElements();
7339       CurrentType = AT->getElementType();
7340     } else {
7341       return None;
7342     }
7343     OperandIndex += Index;
7344   }
7345   return OperandIndex;
7346 }
7347 
7348 static bool findBuildAggregate_rec(Instruction *LastInsertInst,
7349                                    TargetTransformInfo *TTI,
7350                                    SmallVectorImpl<Value *> &BuildVectorOpds,
7351                                    SmallVectorImpl<Value *> &InsertElts,
7352                                    unsigned OperandOffset) {
7353   do {
7354     Value *InsertedOperand = LastInsertInst->getOperand(1);
7355     Optional<unsigned> OperandIndex =
7356         getOperandIndex(LastInsertInst, OperandOffset);
7357     if (!OperandIndex)
7358       return false;
7359     if (isa<InsertElementInst>(InsertedOperand) ||
7360         isa<InsertValueInst>(InsertedOperand)) {
7361       if (!findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI,
7362                                   BuildVectorOpds, InsertElts, *OperandIndex))
7363         return false;
7364     } else {
7365       BuildVectorOpds[*OperandIndex] = InsertedOperand;
7366       InsertElts[*OperandIndex] = LastInsertInst;
7367     }
7368     if (isa<UndefValue>(LastInsertInst->getOperand(0)))
7369       return true;
7370     LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0));
7371   } while (LastInsertInst != nullptr &&
7372            (isa<InsertValueInst>(LastInsertInst) ||
7373             isa<InsertElementInst>(LastInsertInst)) &&
7374            LastInsertInst->hasOneUse());
7375   return false;
7376 }
7377 
7378 /// Recognize construction of vectors like
7379 ///  %ra = insertelement <4 x float> undef, float %s0, i32 0
7380 ///  %rb = insertelement <4 x float> %ra, float %s1, i32 1
7381 ///  %rc = insertelement <4 x float> %rb, float %s2, i32 2
7382 ///  %rd = insertelement <4 x float> %rc, float %s3, i32 3
7383 ///  starting from the last insertelement or insertvalue instruction.
7384 ///
7385 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>},
7386 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on.
7387 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples.
7388 ///
7389 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type.
7390 ///
7391 /// \return true if it matches.
7392 static bool findBuildAggregate(Instruction *LastInsertInst,
7393                                TargetTransformInfo *TTI,
7394                                SmallVectorImpl<Value *> &BuildVectorOpds,
7395                                SmallVectorImpl<Value *> &InsertElts) {
7396 
7397   assert((isa<InsertElementInst>(LastInsertInst) ||
7398           isa<InsertValueInst>(LastInsertInst)) &&
7399          "Expected insertelement or insertvalue instruction!");
7400 
7401   assert((BuildVectorOpds.empty() && InsertElts.empty()) &&
7402          "Expected empty result vectors!");
7403 
7404   Optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst);
7405   if (!AggregateSize)
7406     return false;
7407   BuildVectorOpds.resize(*AggregateSize);
7408   InsertElts.resize(*AggregateSize);
7409 
7410   if (findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts,
7411                              0)) {
7412     llvm::erase_value(BuildVectorOpds, nullptr);
7413     llvm::erase_value(InsertElts, nullptr);
7414     if (BuildVectorOpds.size() >= 2)
7415       return true;
7416   }
7417 
7418   return false;
7419 }
7420 
7421 static bool PhiTypeSorterFunc(Value *V, Value *V2) {
7422   return V->getType() < V2->getType();
7423 }
7424 
7425 /// Try and get a reduction value from a phi node.
7426 ///
7427 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions
7428 /// if they come from either \p ParentBB or a containing loop latch.
7429 ///
7430 /// \returns A candidate reduction value if possible, or \code nullptr \endcode
7431 /// if not possible.
7432 static Value *getReductionValue(const DominatorTree *DT, PHINode *P,
7433                                 BasicBlock *ParentBB, LoopInfo *LI) {
7434   // There are situations where the reduction value is not dominated by the
7435   // reduction phi. Vectorizing such cases has been reported to cause
7436   // miscompiles. See PR25787.
7437   auto DominatedReduxValue = [&](Value *R) {
7438     return isa<Instruction>(R) &&
7439            DT->dominates(P->getParent(), cast<Instruction>(R)->getParent());
7440   };
7441 
7442   Value *Rdx = nullptr;
7443 
7444   // Return the incoming value if it comes from the same BB as the phi node.
7445   if (P->getIncomingBlock(0) == ParentBB) {
7446     Rdx = P->getIncomingValue(0);
7447   } else if (P->getIncomingBlock(1) == ParentBB) {
7448     Rdx = P->getIncomingValue(1);
7449   }
7450 
7451   if (Rdx && DominatedReduxValue(Rdx))
7452     return Rdx;
7453 
7454   // Otherwise, check whether we have a loop latch to look at.
7455   Loop *BBL = LI->getLoopFor(ParentBB);
7456   if (!BBL)
7457     return nullptr;
7458   BasicBlock *BBLatch = BBL->getLoopLatch();
7459   if (!BBLatch)
7460     return nullptr;
7461 
7462   // There is a loop latch, return the incoming value if it comes from
7463   // that. This reduction pattern occasionally turns up.
7464   if (P->getIncomingBlock(0) == BBLatch) {
7465     Rdx = P->getIncomingValue(0);
7466   } else if (P->getIncomingBlock(1) == BBLatch) {
7467     Rdx = P->getIncomingValue(1);
7468   }
7469 
7470   if (Rdx && DominatedReduxValue(Rdx))
7471     return Rdx;
7472 
7473   return nullptr;
7474 }
7475 
7476 /// Attempt to reduce a horizontal reduction.
7477 /// If it is legal to match a horizontal reduction feeding the phi node \a P
7478 /// with reduction operators \a Root (or one of its operands) in a basic block
7479 /// \a BB, then check if it can be done. If horizontal reduction is not found
7480 /// and root instruction is a binary operation, vectorization of the operands is
7481 /// attempted.
7482 /// \returns true if a horizontal reduction was matched and reduced or operands
7483 /// of one of the binary instruction were vectorized.
7484 /// \returns false if a horizontal reduction was not matched (or not possible)
7485 /// or no vectorization of any binary operation feeding \a Root instruction was
7486 /// performed.
7487 static bool tryToVectorizeHorReductionOrInstOperands(
7488     PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R,
7489     TargetTransformInfo *TTI,
7490     const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) {
7491   if (!ShouldVectorizeHor)
7492     return false;
7493 
7494   if (!Root)
7495     return false;
7496 
7497   if (Root->getParent() != BB || isa<PHINode>(Root))
7498     return false;
7499   // Start analysis starting from Root instruction. If horizontal reduction is
7500   // found, try to vectorize it. If it is not a horizontal reduction or
7501   // vectorization is not possible or not effective, and currently analyzed
7502   // instruction is a binary operation, try to vectorize the operands, using
7503   // pre-order DFS traversal order. If the operands were not vectorized, repeat
7504   // the same procedure considering each operand as a possible root of the
7505   // horizontal reduction.
7506   // Interrupt the process if the Root instruction itself was vectorized or all
7507   // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized.
7508   SmallVector<std::pair<Instruction *, unsigned>, 8> Stack(1, {Root, 0});
7509   SmallPtrSet<Value *, 8> VisitedInstrs;
7510   bool Res = false;
7511   while (!Stack.empty()) {
7512     Instruction *Inst;
7513     unsigned Level;
7514     std::tie(Inst, Level) = Stack.pop_back_val();
7515     Value *B0, *B1;
7516     bool IsBinop = match(Inst, m_BinOp(m_Value(B0), m_Value(B1)));
7517     bool IsSelect = match(Inst, m_Select(m_Value(), m_Value(), m_Value()));
7518     if (IsBinop || IsSelect) {
7519       HorizontalReduction HorRdx;
7520       if (HorRdx.matchAssociativeReduction(P, Inst)) {
7521         if (HorRdx.tryToReduce(R, TTI)) {
7522           Res = true;
7523           // Set P to nullptr to avoid re-analysis of phi node in
7524           // matchAssociativeReduction function unless this is the root node.
7525           P = nullptr;
7526           continue;
7527         }
7528       }
7529       if (P && IsBinop) {
7530         Inst = dyn_cast<Instruction>(B0);
7531         if (Inst == P)
7532           Inst = dyn_cast<Instruction>(B1);
7533         if (!Inst) {
7534           // Set P to nullptr to avoid re-analysis of phi node in
7535           // matchAssociativeReduction function unless this is the root node.
7536           P = nullptr;
7537           continue;
7538         }
7539       }
7540     }
7541     // Set P to nullptr to avoid re-analysis of phi node in
7542     // matchAssociativeReduction function unless this is the root node.
7543     P = nullptr;
7544     if (Vectorize(Inst, R)) {
7545       Res = true;
7546       continue;
7547     }
7548 
7549     // Try to vectorize operands.
7550     // Continue analysis for the instruction from the same basic block only to
7551     // save compile time.
7552     if (++Level < RecursionMaxDepth)
7553       for (auto *Op : Inst->operand_values())
7554         if (VisitedInstrs.insert(Op).second)
7555           if (auto *I = dyn_cast<Instruction>(Op))
7556             if (!isa<PHINode>(I) && !R.isDeleted(I) && I->getParent() == BB)
7557               Stack.emplace_back(I, Level);
7558   }
7559   return Res;
7560 }
7561 
7562 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V,
7563                                                  BasicBlock *BB, BoUpSLP &R,
7564                                                  TargetTransformInfo *TTI) {
7565   auto *I = dyn_cast_or_null<Instruction>(V);
7566   if (!I)
7567     return false;
7568 
7569   if (!isa<BinaryOperator>(I))
7570     P = nullptr;
7571   // Try to match and vectorize a horizontal reduction.
7572   auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool {
7573     return tryToVectorize(I, R);
7574   };
7575   return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI,
7576                                                   ExtraVectorization);
7577 }
7578 
7579 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI,
7580                                                  BasicBlock *BB, BoUpSLP &R) {
7581   const DataLayout &DL = BB->getModule()->getDataLayout();
7582   if (!R.canMapToVector(IVI->getType(), DL))
7583     return false;
7584 
7585   SmallVector<Value *, 16> BuildVectorOpds;
7586   SmallVector<Value *, 16> BuildVectorInsts;
7587   if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts))
7588     return false;
7589 
7590   LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n");
7591   // Aggregate value is unlikely to be processed in vector register, we need to
7592   // extract scalars into scalar registers, so NeedExtraction is set true.
7593   return tryToVectorizeList(BuildVectorOpds, R, /*AllowReorder=*/false,
7594                             BuildVectorInsts);
7595 }
7596 
7597 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI,
7598                                                    BasicBlock *BB, BoUpSLP &R) {
7599   SmallVector<Value *, 16> BuildVectorInsts;
7600   SmallVector<Value *, 16> BuildVectorOpds;
7601   if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) ||
7602       (llvm::all_of(BuildVectorOpds,
7603                     [](Value *V) { return isa<ExtractElementInst>(V); }) &&
7604        isShuffle(BuildVectorOpds)))
7605     return false;
7606 
7607   // Vectorize starting with the build vector operands ignoring the BuildVector
7608   // instructions for the purpose of scheduling and user extraction.
7609   return tryToVectorizeList(BuildVectorOpds, R, /*AllowReorder=*/false,
7610                             BuildVectorInsts);
7611 }
7612 
7613 bool SLPVectorizerPass::vectorizeCmpInst(CmpInst *CI, BasicBlock *BB,
7614                                          BoUpSLP &R) {
7615   if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R))
7616     return true;
7617 
7618   bool OpsChanged = false;
7619   for (int Idx = 0; Idx < 2; ++Idx) {
7620     OpsChanged |=
7621         vectorizeRootInstruction(nullptr, CI->getOperand(Idx), BB, R, TTI);
7622   }
7623   return OpsChanged;
7624 }
7625 
7626 bool SLPVectorizerPass::vectorizeSimpleInstructions(
7627     SmallVectorImpl<Instruction *> &Instructions, BasicBlock *BB, BoUpSLP &R) {
7628   bool OpsChanged = false;
7629   for (auto *I : reverse(Instructions)) {
7630     if (R.isDeleted(I))
7631       continue;
7632     if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I))
7633       OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R);
7634     else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I))
7635       OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R);
7636     else if (auto *CI = dyn_cast<CmpInst>(I))
7637       OpsChanged |= vectorizeCmpInst(CI, BB, R);
7638   }
7639   Instructions.clear();
7640   return OpsChanged;
7641 }
7642 
7643 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
7644   bool Changed = false;
7645   SmallVector<Value *, 4> Incoming;
7646   SmallPtrSet<Value *, 16> VisitedInstrs;
7647 
7648   bool HaveVectorizedPhiNodes = true;
7649   while (HaveVectorizedPhiNodes) {
7650     HaveVectorizedPhiNodes = false;
7651 
7652     // Collect the incoming values from the PHIs.
7653     Incoming.clear();
7654     for (Instruction &I : *BB) {
7655       PHINode *P = dyn_cast<PHINode>(&I);
7656       if (!P)
7657         break;
7658 
7659       if (!VisitedInstrs.count(P) && !R.isDeleted(P))
7660         Incoming.push_back(P);
7661     }
7662 
7663     // Sort by type.
7664     llvm::stable_sort(Incoming, PhiTypeSorterFunc);
7665 
7666     // Try to vectorize elements base on their type.
7667     for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(),
7668                                            E = Incoming.end();
7669          IncIt != E;) {
7670 
7671       // Look for the next elements with the same type.
7672       SmallVector<Value *, 4>::iterator SameTypeIt = IncIt;
7673       while (SameTypeIt != E &&
7674              (*SameTypeIt)->getType() == (*IncIt)->getType()) {
7675         VisitedInstrs.insert(*SameTypeIt);
7676         ++SameTypeIt;
7677       }
7678 
7679       // Try to vectorize them.
7680       unsigned NumElts = (SameTypeIt - IncIt);
7681       LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at PHIs ("
7682                         << NumElts << ")\n");
7683       // The order in which the phi nodes appear in the program does not matter.
7684       // So allow tryToVectorizeList to reorder them if it is beneficial. This
7685       // is done when there are exactly two elements since tryToVectorizeList
7686       // asserts that there are only two values when AllowReorder is true.
7687       bool AllowReorder = NumElts == 2;
7688       if (NumElts > 1 &&
7689           tryToVectorizeList(makeArrayRef(IncIt, NumElts), R, AllowReorder)) {
7690         // Success start over because instructions might have been changed.
7691         HaveVectorizedPhiNodes = true;
7692         Changed = true;
7693         break;
7694       }
7695 
7696       // Start over at the next instruction of a different type (or the end).
7697       IncIt = SameTypeIt;
7698     }
7699   }
7700 
7701   VisitedInstrs.clear();
7702 
7703   SmallVector<Instruction *, 8> PostProcessInstructions;
7704   SmallDenseSet<Instruction *, 4> KeyNodes;
7705   for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
7706     // Skip instructions with scalable type. The num of elements is unknown at
7707     // compile-time for scalable type.
7708     if (isa<ScalableVectorType>(it->getType()))
7709       continue;
7710 
7711     // Skip instructions marked for the deletion.
7712     if (R.isDeleted(&*it))
7713       continue;
7714     // We may go through BB multiple times so skip the one we have checked.
7715     if (!VisitedInstrs.insert(&*it).second) {
7716       if (it->use_empty() && KeyNodes.count(&*it) > 0 &&
7717           vectorizeSimpleInstructions(PostProcessInstructions, BB, R)) {
7718         // We would like to start over since some instructions are deleted
7719         // and the iterator may become invalid value.
7720         Changed = true;
7721         it = BB->begin();
7722         e = BB->end();
7723       }
7724       continue;
7725     }
7726 
7727     if (isa<DbgInfoIntrinsic>(it))
7728       continue;
7729 
7730     // Try to vectorize reductions that use PHINodes.
7731     if (PHINode *P = dyn_cast<PHINode>(it)) {
7732       // Check that the PHI is a reduction PHI.
7733       if (P->getNumIncomingValues() == 2) {
7734         // Try to match and vectorize a horizontal reduction.
7735         if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R,
7736                                      TTI)) {
7737           Changed = true;
7738           it = BB->begin();
7739           e = BB->end();
7740           continue;
7741         }
7742       }
7743       // Try to vectorize the incoming values of the PHI, to catch reductions
7744       // that feed into PHIs.
7745       for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) {
7746         // Skip if the incoming block is the current BB for now. Also, bypass
7747         // unreachable IR for efficiency and to avoid crashing.
7748         // TODO: Collect the skipped incoming values and try to vectorize them
7749         // after processing BB.
7750         if (BB == P->getIncomingBlock(I) ||
7751             !DT->isReachableFromEntry(P->getIncomingBlock(I)))
7752           continue;
7753 
7754         Changed |= vectorizeRootInstruction(nullptr, P->getIncomingValue(I),
7755                                             P->getIncomingBlock(I), R, TTI);
7756       }
7757       continue;
7758     }
7759 
7760     // Ran into an instruction without users, like terminator, or function call
7761     // with ignored return value, store. Ignore unused instructions (basing on
7762     // instruction type, except for CallInst and InvokeInst).
7763     if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) ||
7764                             isa<InvokeInst>(it))) {
7765       KeyNodes.insert(&*it);
7766       bool OpsChanged = false;
7767       if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) {
7768         for (auto *V : it->operand_values()) {
7769           // Try to match and vectorize a horizontal reduction.
7770           OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI);
7771         }
7772       }
7773       // Start vectorization of post-process list of instructions from the
7774       // top-tree instructions to try to vectorize as many instructions as
7775       // possible.
7776       OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R);
7777       if (OpsChanged) {
7778         // We would like to start over since some instructions are deleted
7779         // and the iterator may become invalid value.
7780         Changed = true;
7781         it = BB->begin();
7782         e = BB->end();
7783         continue;
7784       }
7785     }
7786 
7787     if (isa<InsertElementInst>(it) || isa<CmpInst>(it) ||
7788         isa<InsertValueInst>(it))
7789       PostProcessInstructions.push_back(&*it);
7790   }
7791 
7792   return Changed;
7793 }
7794 
7795 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) {
7796   auto Changed = false;
7797   for (auto &Entry : GEPs) {
7798     // If the getelementptr list has fewer than two elements, there's nothing
7799     // to do.
7800     if (Entry.second.size() < 2)
7801       continue;
7802 
7803     LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "
7804                       << Entry.second.size() << ".\n");
7805 
7806     // Process the GEP list in chunks suitable for the target's supported
7807     // vector size. If a vector register can't hold 1 element, we are done. We
7808     // are trying to vectorize the index computations, so the maximum number of
7809     // elements is based on the size of the index expression, rather than the
7810     // size of the GEP itself (the target's pointer size).
7811     unsigned MaxVecRegSize = R.getMaxVecRegSize();
7812     unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin());
7813     if (MaxVecRegSize < EltSize)
7814       continue;
7815 
7816     unsigned MaxElts = MaxVecRegSize / EltSize;
7817     for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) {
7818       auto Len = std::min<unsigned>(BE - BI, MaxElts);
7819       ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len);
7820 
7821       // Initialize a set a candidate getelementptrs. Note that we use a
7822       // SetVector here to preserve program order. If the index computations
7823       // are vectorizable and begin with loads, we want to minimize the chance
7824       // of having to reorder them later.
7825       SetVector<Value *> Candidates(GEPList.begin(), GEPList.end());
7826 
7827       // Some of the candidates may have already been vectorized after we
7828       // initially collected them. If so, they are marked as deleted, so remove
7829       // them from the set of candidates.
7830       Candidates.remove_if(
7831           [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); });
7832 
7833       // Remove from the set of candidates all pairs of getelementptrs with
7834       // constant differences. Such getelementptrs are likely not good
7835       // candidates for vectorization in a bottom-up phase since one can be
7836       // computed from the other. We also ensure all candidate getelementptr
7837       // indices are unique.
7838       for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) {
7839         auto *GEPI = GEPList[I];
7840         if (!Candidates.count(GEPI))
7841           continue;
7842         auto *SCEVI = SE->getSCEV(GEPList[I]);
7843         for (int J = I + 1; J < E && Candidates.size() > 1; ++J) {
7844           auto *GEPJ = GEPList[J];
7845           auto *SCEVJ = SE->getSCEV(GEPList[J]);
7846           if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) {
7847             Candidates.remove(GEPI);
7848             Candidates.remove(GEPJ);
7849           } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) {
7850             Candidates.remove(GEPJ);
7851           }
7852         }
7853       }
7854 
7855       // We break out of the above computation as soon as we know there are
7856       // fewer than two candidates remaining.
7857       if (Candidates.size() < 2)
7858         continue;
7859 
7860       // Add the single, non-constant index of each candidate to the bundle. We
7861       // ensured the indices met these constraints when we originally collected
7862       // the getelementptrs.
7863       SmallVector<Value *, 16> Bundle(Candidates.size());
7864       auto BundleIndex = 0u;
7865       for (auto *V : Candidates) {
7866         auto *GEP = cast<GetElementPtrInst>(V);
7867         auto *GEPIdx = GEP->idx_begin()->get();
7868         assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx));
7869         Bundle[BundleIndex++] = GEPIdx;
7870       }
7871 
7872       // Try and vectorize the indices. We are currently only interested in
7873       // gather-like cases of the form:
7874       //
7875       // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ...
7876       //
7877       // where the loads of "a", the loads of "b", and the subtractions can be
7878       // performed in parallel. It's likely that detecting this pattern in a
7879       // bottom-up phase will be simpler and less costly than building a
7880       // full-blown top-down phase beginning at the consecutive loads.
7881       Changed |= tryToVectorizeList(Bundle, R);
7882     }
7883   }
7884   return Changed;
7885 }
7886 
7887 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) {
7888   bool Changed = false;
7889   // Attempt to sort and vectorize each of the store-groups.
7890   for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e;
7891        ++it) {
7892     if (it->second.size() < 2)
7893       continue;
7894 
7895     LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
7896                       << it->second.size() << ".\n");
7897 
7898     Changed |= vectorizeStores(it->second, R);
7899   }
7900   return Changed;
7901 }
7902 
7903 char SLPVectorizer::ID = 0;
7904 
7905 static const char lv_name[] = "SLP Vectorizer";
7906 
7907 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false)
7908 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7909 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7910 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7911 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7912 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
7913 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7914 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7915 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7916 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false)
7917 
7918 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); }
7919