1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10 // stores that can be put together into vector-stores. Next, it attempts to
11 // construct vectorizable tree using the use-def chains. If a profitable tree
12 // was found, the SLP vectorizer performs vectorization on the tree.
13 //
14 // The pass is inspired by the work described in the paper:
15 //  "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
16 //
17 //===----------------------------------------------------------------------===//
18 
19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/DenseSet.h"
22 #include "llvm/ADT/Optional.h"
23 #include "llvm/ADT/PostOrderIterator.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SetOperations.h"
26 #include "llvm/ADT/SetVector.h"
27 #include "llvm/ADT/SmallBitVector.h"
28 #include "llvm/ADT/SmallPtrSet.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/SmallString.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/ADT/iterator.h"
33 #include "llvm/ADT/iterator_range.h"
34 #include "llvm/Analysis/AliasAnalysis.h"
35 #include "llvm/Analysis/AssumptionCache.h"
36 #include "llvm/Analysis/CodeMetrics.h"
37 #include "llvm/Analysis/DemandedBits.h"
38 #include "llvm/Analysis/GlobalsModRef.h"
39 #include "llvm/Analysis/IVDescriptors.h"
40 #include "llvm/Analysis/LoopAccessAnalysis.h"
41 #include "llvm/Analysis/LoopInfo.h"
42 #include "llvm/Analysis/MemoryLocation.h"
43 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
44 #include "llvm/Analysis/ScalarEvolution.h"
45 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
46 #include "llvm/Analysis/TargetLibraryInfo.h"
47 #include "llvm/Analysis/TargetTransformInfo.h"
48 #include "llvm/Analysis/ValueTracking.h"
49 #include "llvm/Analysis/VectorUtils.h"
50 #include "llvm/IR/Attributes.h"
51 #include "llvm/IR/BasicBlock.h"
52 #include "llvm/IR/Constant.h"
53 #include "llvm/IR/Constants.h"
54 #include "llvm/IR/DataLayout.h"
55 #include "llvm/IR/DebugLoc.h"
56 #include "llvm/IR/DerivedTypes.h"
57 #include "llvm/IR/Dominators.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/IRBuilder.h"
60 #include "llvm/IR/InstrTypes.h"
61 #include "llvm/IR/Instruction.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/Module.h"
66 #include "llvm/IR/NoFolder.h"
67 #include "llvm/IR/Operator.h"
68 #include "llvm/IR/PatternMatch.h"
69 #include "llvm/IR/Type.h"
70 #include "llvm/IR/Use.h"
71 #include "llvm/IR/User.h"
72 #include "llvm/IR/Value.h"
73 #include "llvm/IR/ValueHandle.h"
74 #include "llvm/IR/Verifier.h"
75 #include "llvm/InitializePasses.h"
76 #include "llvm/Pass.h"
77 #include "llvm/Support/Casting.h"
78 #include "llvm/Support/CommandLine.h"
79 #include "llvm/Support/Compiler.h"
80 #include "llvm/Support/DOTGraphTraits.h"
81 #include "llvm/Support/Debug.h"
82 #include "llvm/Support/ErrorHandling.h"
83 #include "llvm/Support/GraphWriter.h"
84 #include "llvm/Support/InstructionCost.h"
85 #include "llvm/Support/KnownBits.h"
86 #include "llvm/Support/MathExtras.h"
87 #include "llvm/Support/raw_ostream.h"
88 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
89 #include "llvm/Transforms/Utils/LoopUtils.h"
90 #include "llvm/Transforms/Vectorize.h"
91 #include <algorithm>
92 #include <cassert>
93 #include <cstdint>
94 #include <iterator>
95 #include <memory>
96 #include <set>
97 #include <string>
98 #include <tuple>
99 #include <utility>
100 #include <vector>
101 
102 using namespace llvm;
103 using namespace llvm::PatternMatch;
104 using namespace slpvectorizer;
105 
106 #define SV_NAME "slp-vectorizer"
107 #define DEBUG_TYPE "SLP"
108 
109 STATISTIC(NumVectorInstructions, "Number of vector instructions generated");
110 
111 cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden,
112                                   cl::desc("Run the SLP vectorization passes"));
113 
114 static cl::opt<int>
115     SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
116                      cl::desc("Only vectorize if you gain more than this "
117                               "number "));
118 
119 static cl::opt<bool>
120 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden,
121                    cl::desc("Attempt to vectorize horizontal reductions"));
122 
123 static cl::opt<bool> ShouldStartVectorizeHorAtStore(
124     "slp-vectorize-hor-store", cl::init(false), cl::Hidden,
125     cl::desc(
126         "Attempt to vectorize horizontal reductions feeding into a store"));
127 
128 static cl::opt<int>
129 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden,
130     cl::desc("Attempt to vectorize for this register size in bits"));
131 
132 static cl::opt<unsigned>
133 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden,
134     cl::desc("Maximum SLP vectorization factor (0=unlimited)"));
135 
136 static cl::opt<int>
137 MaxStoreLookup("slp-max-store-lookup", cl::init(32), cl::Hidden,
138     cl::desc("Maximum depth of the lookup for consecutive stores."));
139 
140 /// Limits the size of scheduling regions in a block.
141 /// It avoid long compile times for _very_ large blocks where vector
142 /// instructions are spread over a wide range.
143 /// This limit is way higher than needed by real-world functions.
144 static cl::opt<int>
145 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden,
146     cl::desc("Limit the size of the SLP scheduling region per block"));
147 
148 static cl::opt<int> MinVectorRegSizeOption(
149     "slp-min-reg-size", cl::init(128), cl::Hidden,
150     cl::desc("Attempt to vectorize for this register size in bits"));
151 
152 static cl::opt<unsigned> RecursionMaxDepth(
153     "slp-recursion-max-depth", cl::init(12), cl::Hidden,
154     cl::desc("Limit the recursion depth when building a vectorizable tree"));
155 
156 static cl::opt<unsigned> MinTreeSize(
157     "slp-min-tree-size", cl::init(3), cl::Hidden,
158     cl::desc("Only vectorize small trees if they are fully vectorizable"));
159 
160 // The maximum depth that the look-ahead score heuristic will explore.
161 // The higher this value, the higher the compilation time overhead.
162 static cl::opt<int> LookAheadMaxDepth(
163     "slp-max-look-ahead-depth", cl::init(2), cl::Hidden,
164     cl::desc("The maximum look-ahead depth for operand reordering scores"));
165 
166 // The Look-ahead heuristic goes through the users of the bundle to calculate
167 // the users cost in getExternalUsesCost(). To avoid compilation time increase
168 // we limit the number of users visited to this value.
169 static cl::opt<unsigned> LookAheadUsersBudget(
170     "slp-look-ahead-users-budget", cl::init(2), cl::Hidden,
171     cl::desc("The maximum number of users to visit while visiting the "
172              "predecessors. This prevents compilation time increase."));
173 
174 static cl::opt<bool>
175     ViewSLPTree("view-slp-tree", cl::Hidden,
176                 cl::desc("Display the SLP trees with Graphviz"));
177 
178 // Limit the number of alias checks. The limit is chosen so that
179 // it has no negative effect on the llvm benchmarks.
180 static const unsigned AliasedCheckLimit = 10;
181 
182 // Another limit for the alias checks: The maximum distance between load/store
183 // instructions where alias checks are done.
184 // This limit is useful for very large basic blocks.
185 static const unsigned MaxMemDepDistance = 160;
186 
187 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling
188 /// regions to be handled.
189 static const int MinScheduleRegionSize = 16;
190 
191 /// Predicate for the element types that the SLP vectorizer supports.
192 ///
193 /// The most important thing to filter here are types which are invalid in LLVM
194 /// vectors. We also filter target specific types which have absolutely no
195 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
196 /// avoids spending time checking the cost model and realizing that they will
197 /// be inevitably scalarized.
198 static bool isValidElementType(Type *Ty) {
199   return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
200          !Ty->isPPC_FP128Ty();
201 }
202 
203 /// \returns true if all of the instructions in \p VL are in the same block or
204 /// false otherwise.
205 static bool allSameBlock(ArrayRef<Value *> VL) {
206   Instruction *I0 = dyn_cast<Instruction>(VL[0]);
207   if (!I0)
208     return false;
209   BasicBlock *BB = I0->getParent();
210   for (int I = 1, E = VL.size(); I < E; I++) {
211     auto *II = dyn_cast<Instruction>(VL[I]);
212     if (!II)
213       return false;
214 
215     if (BB != II->getParent())
216       return false;
217   }
218   return true;
219 }
220 
221 /// \returns True if the value is a constant (but not globals/constant
222 /// expressions).
223 static bool isConstant(Value *V) {
224   return isa<Constant>(V) && !isa<ConstantExpr>(V) && !isa<GlobalValue>(V);
225 }
226 
227 /// \returns True if all of the values in \p VL are constants (but not
228 /// globals/constant expressions).
229 static bool allConstant(ArrayRef<Value *> VL) {
230   // Constant expressions and globals can't be vectorized like normal integer/FP
231   // constants.
232   return all_of(VL, isConstant);
233 }
234 
235 /// \returns True if all of the values in \p VL are identical.
236 static bool isSplat(ArrayRef<Value *> VL) {
237   for (unsigned i = 1, e = VL.size(); i < e; ++i)
238     if (VL[i] != VL[0])
239       return false;
240   return true;
241 }
242 
243 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator.
244 static bool isCommutative(Instruction *I) {
245   if (auto *Cmp = dyn_cast<CmpInst>(I))
246     return Cmp->isCommutative();
247   if (auto *BO = dyn_cast<BinaryOperator>(I))
248     return BO->isCommutative();
249   // TODO: This should check for generic Instruction::isCommutative(), but
250   //       we need to confirm that the caller code correctly handles Intrinsics
251   //       for example (does not have 2 operands).
252   return false;
253 }
254 
255 /// Checks if the vector of instructions can be represented as a shuffle, like:
256 /// %x0 = extractelement <4 x i8> %x, i32 0
257 /// %x3 = extractelement <4 x i8> %x, i32 3
258 /// %y1 = extractelement <4 x i8> %y, i32 1
259 /// %y2 = extractelement <4 x i8> %y, i32 2
260 /// %x0x0 = mul i8 %x0, %x0
261 /// %x3x3 = mul i8 %x3, %x3
262 /// %y1y1 = mul i8 %y1, %y1
263 /// %y2y2 = mul i8 %y2, %y2
264 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0
265 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1
266 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2
267 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3
268 /// ret <4 x i8> %ins4
269 /// can be transformed into:
270 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5,
271 ///                                                         i32 6>
272 /// %2 = mul <4 x i8> %1, %1
273 /// ret <4 x i8> %2
274 /// We convert this initially to something like:
275 /// %x0 = extractelement <4 x i8> %x, i32 0
276 /// %x3 = extractelement <4 x i8> %x, i32 3
277 /// %y1 = extractelement <4 x i8> %y, i32 1
278 /// %y2 = extractelement <4 x i8> %y, i32 2
279 /// %1 = insertelement <4 x i8> poison, i8 %x0, i32 0
280 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1
281 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2
282 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3
283 /// %5 = mul <4 x i8> %4, %4
284 /// %6 = extractelement <4 x i8> %5, i32 0
285 /// %ins1 = insertelement <4 x i8> poison, i8 %6, i32 0
286 /// %7 = extractelement <4 x i8> %5, i32 1
287 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1
288 /// %8 = extractelement <4 x i8> %5, i32 2
289 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2
290 /// %9 = extractelement <4 x i8> %5, i32 3
291 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3
292 /// ret <4 x i8> %ins4
293 /// InstCombiner transforms this into a shuffle and vector mul
294 /// Mask will return the Shuffle Mask equivalent to the extracted elements.
295 /// TODO: Can we split off and reuse the shuffle mask detection from
296 /// TargetTransformInfo::getInstructionThroughput?
297 static Optional<TargetTransformInfo::ShuffleKind>
298 isShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) {
299   auto *EI0 = cast<ExtractElementInst>(VL[0]);
300   unsigned Size =
301       cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements();
302   Value *Vec1 = nullptr;
303   Value *Vec2 = nullptr;
304   enum ShuffleMode { Unknown, Select, Permute };
305   ShuffleMode CommonShuffleMode = Unknown;
306   for (unsigned I = 0, E = VL.size(); I < E; ++I) {
307     auto *EI = cast<ExtractElementInst>(VL[I]);
308     auto *Vec = EI->getVectorOperand();
309     // All vector operands must have the same number of vector elements.
310     if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size)
311       return None;
312     auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand());
313     if (!Idx)
314       return None;
315     // Undefined behavior if Idx is negative or >= Size.
316     if (Idx->getValue().uge(Size)) {
317       Mask.push_back(UndefMaskElem);
318       continue;
319     }
320     unsigned IntIdx = Idx->getValue().getZExtValue();
321     Mask.push_back(IntIdx);
322     // We can extractelement from undef or poison vector.
323     if (isa<UndefValue>(Vec))
324       continue;
325     // For correct shuffling we have to have at most 2 different vector operands
326     // in all extractelement instructions.
327     if (!Vec1 || Vec1 == Vec)
328       Vec1 = Vec;
329     else if (!Vec2 || Vec2 == Vec)
330       Vec2 = Vec;
331     else
332       return None;
333     if (CommonShuffleMode == Permute)
334       continue;
335     // If the extract index is not the same as the operation number, it is a
336     // permutation.
337     if (IntIdx != I) {
338       CommonShuffleMode = Permute;
339       continue;
340     }
341     CommonShuffleMode = Select;
342   }
343   // If we're not crossing lanes in different vectors, consider it as blending.
344   if (CommonShuffleMode == Select && Vec2)
345     return TargetTransformInfo::SK_Select;
346   // If Vec2 was never used, we have a permutation of a single vector, otherwise
347   // we have permutation of 2 vectors.
348   return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc
349               : TargetTransformInfo::SK_PermuteSingleSrc;
350 }
351 
352 namespace {
353 
354 /// Main data required for vectorization of instructions.
355 struct InstructionsState {
356   /// The very first instruction in the list with the main opcode.
357   Value *OpValue = nullptr;
358 
359   /// The main/alternate instruction.
360   Instruction *MainOp = nullptr;
361   Instruction *AltOp = nullptr;
362 
363   /// The main/alternate opcodes for the list of instructions.
364   unsigned getOpcode() const {
365     return MainOp ? MainOp->getOpcode() : 0;
366   }
367 
368   unsigned getAltOpcode() const {
369     return AltOp ? AltOp->getOpcode() : 0;
370   }
371 
372   /// Some of the instructions in the list have alternate opcodes.
373   bool isAltShuffle() const { return getOpcode() != getAltOpcode(); }
374 
375   bool isOpcodeOrAlt(Instruction *I) const {
376     unsigned CheckedOpcode = I->getOpcode();
377     return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode;
378   }
379 
380   InstructionsState() = delete;
381   InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp)
382       : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {}
383 };
384 
385 } // end anonymous namespace
386 
387 /// Chooses the correct key for scheduling data. If \p Op has the same (or
388 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p
389 /// OpValue.
390 static Value *isOneOf(const InstructionsState &S, Value *Op) {
391   auto *I = dyn_cast<Instruction>(Op);
392   if (I && S.isOpcodeOrAlt(I))
393     return Op;
394   return S.OpValue;
395 }
396 
397 /// \returns true if \p Opcode is allowed as part of of the main/alternate
398 /// instruction for SLP vectorization.
399 ///
400 /// Example of unsupported opcode is SDIV that can potentially cause UB if the
401 /// "shuffled out" lane would result in division by zero.
402 static bool isValidForAlternation(unsigned Opcode) {
403   if (Instruction::isIntDivRem(Opcode))
404     return false;
405 
406   return true;
407 }
408 
409 /// \returns analysis of the Instructions in \p VL described in
410 /// InstructionsState, the Opcode that we suppose the whole list
411 /// could be vectorized even if its structure is diverse.
412 static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
413                                        unsigned BaseIndex = 0) {
414   // Make sure these are all Instructions.
415   if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); }))
416     return InstructionsState(VL[BaseIndex], nullptr, nullptr);
417 
418   bool IsCastOp = isa<CastInst>(VL[BaseIndex]);
419   bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]);
420   unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode();
421   unsigned AltOpcode = Opcode;
422   unsigned AltIndex = BaseIndex;
423 
424   // Check for one alternate opcode from another BinaryOperator.
425   // TODO - generalize to support all operators (types, calls etc.).
426   for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) {
427     unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode();
428     if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) {
429       if (InstOpcode == Opcode || InstOpcode == AltOpcode)
430         continue;
431       if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) &&
432           isValidForAlternation(Opcode)) {
433         AltOpcode = InstOpcode;
434         AltIndex = Cnt;
435         continue;
436       }
437     } else if (IsCastOp && isa<CastInst>(VL[Cnt])) {
438       Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType();
439       Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType();
440       if (Ty0 == Ty1) {
441         if (InstOpcode == Opcode || InstOpcode == AltOpcode)
442           continue;
443         if (Opcode == AltOpcode) {
444           assert(isValidForAlternation(Opcode) &&
445                  isValidForAlternation(InstOpcode) &&
446                  "Cast isn't safe for alternation, logic needs to be updated!");
447           AltOpcode = InstOpcode;
448           AltIndex = Cnt;
449           continue;
450         }
451       }
452     } else if (InstOpcode == Opcode || InstOpcode == AltOpcode)
453       continue;
454     return InstructionsState(VL[BaseIndex], nullptr, nullptr);
455   }
456 
457   return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]),
458                            cast<Instruction>(VL[AltIndex]));
459 }
460 
461 /// \returns true if all of the values in \p VL have the same type or false
462 /// otherwise.
463 static bool allSameType(ArrayRef<Value *> VL) {
464   Type *Ty = VL[0]->getType();
465   for (int i = 1, e = VL.size(); i < e; i++)
466     if (VL[i]->getType() != Ty)
467       return false;
468 
469   return true;
470 }
471 
472 /// \returns True if Extract{Value,Element} instruction extracts element Idx.
473 static Optional<unsigned> getExtractIndex(Instruction *E) {
474   unsigned Opcode = E->getOpcode();
475   assert((Opcode == Instruction::ExtractElement ||
476           Opcode == Instruction::ExtractValue) &&
477          "Expected extractelement or extractvalue instruction.");
478   if (Opcode == Instruction::ExtractElement) {
479     auto *CI = dyn_cast<ConstantInt>(E->getOperand(1));
480     if (!CI)
481       return None;
482     return CI->getZExtValue();
483   }
484   ExtractValueInst *EI = cast<ExtractValueInst>(E);
485   if (EI->getNumIndices() != 1)
486     return None;
487   return *EI->idx_begin();
488 }
489 
490 /// \returns True if in-tree use also needs extract. This refers to
491 /// possible scalar operand in vectorized instruction.
492 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
493                                     TargetLibraryInfo *TLI) {
494   unsigned Opcode = UserInst->getOpcode();
495   switch (Opcode) {
496   case Instruction::Load: {
497     LoadInst *LI = cast<LoadInst>(UserInst);
498     return (LI->getPointerOperand() == Scalar);
499   }
500   case Instruction::Store: {
501     StoreInst *SI = cast<StoreInst>(UserInst);
502     return (SI->getPointerOperand() == Scalar);
503   }
504   case Instruction::Call: {
505     CallInst *CI = cast<CallInst>(UserInst);
506     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
507     for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
508       if (hasVectorInstrinsicScalarOpd(ID, i))
509         return (CI->getArgOperand(i) == Scalar);
510     }
511     LLVM_FALLTHROUGH;
512   }
513   default:
514     return false;
515   }
516 }
517 
518 /// \returns the AA location that is being access by the instruction.
519 static MemoryLocation getLocation(Instruction *I, AAResults *AA) {
520   if (StoreInst *SI = dyn_cast<StoreInst>(I))
521     return MemoryLocation::get(SI);
522   if (LoadInst *LI = dyn_cast<LoadInst>(I))
523     return MemoryLocation::get(LI);
524   return MemoryLocation();
525 }
526 
527 /// \returns True if the instruction is not a volatile or atomic load/store.
528 static bool isSimple(Instruction *I) {
529   if (LoadInst *LI = dyn_cast<LoadInst>(I))
530     return LI->isSimple();
531   if (StoreInst *SI = dyn_cast<StoreInst>(I))
532     return SI->isSimple();
533   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
534     return !MI->isVolatile();
535   return true;
536 }
537 
538 namespace llvm {
539 
540 static void inversePermutation(ArrayRef<unsigned> Indices,
541                                SmallVectorImpl<int> &Mask) {
542   Mask.clear();
543   const unsigned E = Indices.size();
544   Mask.resize(E, E + 1);
545   for (unsigned I = 0; I < E; ++I)
546     Mask[Indices[I]] = I;
547 }
548 
549 /// \returns inserting index of InsertElement or InsertValue instruction,
550 /// using Offset as base offset for index.
551 static Optional<int> getInsertIndex(Value *InsertInst, unsigned Offset) {
552   int Index = Offset;
553   if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) {
554     if (auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2))) {
555       auto *VT = cast<FixedVectorType>(IE->getType());
556       if (CI->getValue().uge(VT->getNumElements()))
557         return UndefMaskElem;
558       Index *= VT->getNumElements();
559       Index += CI->getZExtValue();
560       return Index;
561     }
562     if (isa<UndefValue>(IE->getOperand(2)))
563       return UndefMaskElem;
564     return None;
565   }
566 
567   auto *IV = cast<InsertValueInst>(InsertInst);
568   Type *CurrentType = IV->getType();
569   for (unsigned I : IV->indices()) {
570     if (auto *ST = dyn_cast<StructType>(CurrentType)) {
571       Index *= ST->getNumElements();
572       CurrentType = ST->getElementType(I);
573     } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) {
574       Index *= AT->getNumElements();
575       CurrentType = AT->getElementType();
576     } else {
577       return None;
578     }
579     Index += I;
580   }
581   return Index;
582 }
583 
584 namespace slpvectorizer {
585 
586 /// Bottom Up SLP Vectorizer.
587 class BoUpSLP {
588   struct TreeEntry;
589   struct ScheduleData;
590 
591 public:
592   using ValueList = SmallVector<Value *, 8>;
593   using InstrList = SmallVector<Instruction *, 16>;
594   using ValueSet = SmallPtrSet<Value *, 16>;
595   using StoreList = SmallVector<StoreInst *, 8>;
596   using ExtraValueToDebugLocsMap =
597       MapVector<Value *, SmallVector<Instruction *, 2>>;
598   using OrdersType = SmallVector<unsigned, 4>;
599 
600   BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti,
601           TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li,
602           DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB,
603           const DataLayout *DL, OptimizationRemarkEmitter *ORE)
604       : F(Func), SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC),
605         DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) {
606     CodeMetrics::collectEphemeralValues(F, AC, EphValues);
607     // Use the vector register size specified by the target unless overridden
608     // by a command-line option.
609     // TODO: It would be better to limit the vectorization factor based on
610     //       data type rather than just register size. For example, x86 AVX has
611     //       256-bit registers, but it does not support integer operations
612     //       at that width (that requires AVX2).
613     if (MaxVectorRegSizeOption.getNumOccurrences())
614       MaxVecRegSize = MaxVectorRegSizeOption;
615     else
616       MaxVecRegSize =
617           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
618               .getFixedSize();
619 
620     if (MinVectorRegSizeOption.getNumOccurrences())
621       MinVecRegSize = MinVectorRegSizeOption;
622     else
623       MinVecRegSize = TTI->getMinVectorRegisterBitWidth();
624   }
625 
626   /// Vectorize the tree that starts with the elements in \p VL.
627   /// Returns the vectorized root.
628   Value *vectorizeTree();
629 
630   /// Vectorize the tree but with the list of externally used values \p
631   /// ExternallyUsedValues. Values in this MapVector can be replaced but the
632   /// generated extractvalue instructions.
633   Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues);
634 
635   /// \returns the cost incurred by unwanted spills and fills, caused by
636   /// holding live values over call sites.
637   InstructionCost getSpillCost() const;
638 
639   /// \returns the vectorization cost of the subtree that starts at \p VL.
640   /// A negative number means that this is profitable.
641   InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = None);
642 
643   /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
644   /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
645   void buildTree(ArrayRef<Value *> Roots,
646                  ArrayRef<Value *> UserIgnoreLst = None);
647 
648   /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
649   /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking
650   /// into account (and updating it, if required) list of externally used
651   /// values stored in \p ExternallyUsedValues.
652   void buildTree(ArrayRef<Value *> Roots,
653                  ExtraValueToDebugLocsMap &ExternallyUsedValues,
654                  ArrayRef<Value *> UserIgnoreLst = None);
655 
656   /// Clear the internal data structures that are created by 'buildTree'.
657   void deleteTree() {
658     VectorizableTree.clear();
659     ScalarToTreeEntry.clear();
660     MustGather.clear();
661     ExternalUses.clear();
662     NumOpsWantToKeepOrder.clear();
663     NumOpsWantToKeepOriginalOrder = 0;
664     for (auto &Iter : BlocksSchedules) {
665       BlockScheduling *BS = Iter.second.get();
666       BS->clear();
667     }
668     MinBWs.clear();
669     InstrElementSize.clear();
670   }
671 
672   unsigned getTreeSize() const { return VectorizableTree.size(); }
673 
674   /// Perform LICM and CSE on the newly generated gather sequences.
675   void optimizeGatherSequence();
676 
677   /// \returns The best order of instructions for vectorization.
678   Optional<ArrayRef<unsigned>> bestOrder() const {
679     assert(llvm::all_of(
680                NumOpsWantToKeepOrder,
681                [this](const decltype(NumOpsWantToKeepOrder)::value_type &D) {
682                  return D.getFirst().size() ==
683                         VectorizableTree[0]->Scalars.size();
684                }) &&
685            "All orders must have the same size as number of instructions in "
686            "tree node.");
687     auto I = std::max_element(
688         NumOpsWantToKeepOrder.begin(), NumOpsWantToKeepOrder.end(),
689         [](const decltype(NumOpsWantToKeepOrder)::value_type &D1,
690            const decltype(NumOpsWantToKeepOrder)::value_type &D2) {
691           return D1.second < D2.second;
692         });
693     if (I == NumOpsWantToKeepOrder.end() ||
694         I->getSecond() <= NumOpsWantToKeepOriginalOrder)
695       return None;
696 
697     return makeArrayRef(I->getFirst());
698   }
699 
700   /// Builds the correct order for root instructions.
701   /// If some leaves have the same instructions to be vectorized, we may
702   /// incorrectly evaluate the best order for the root node (it is built for the
703   /// vector of instructions without repeated instructions and, thus, has less
704   /// elements than the root node). This function builds the correct order for
705   /// the root node.
706   /// For example, if the root node is \<a+b, a+c, a+d, f+e\>, then the leaves
707   /// are \<a, a, a, f\> and \<b, c, d, e\>. When we try to vectorize the first
708   /// leaf, it will be shrink to \<a, b\>. If instructions in this leaf should
709   /// be reordered, the best order will be \<1, 0\>. We need to extend this
710   /// order for the root node. For the root node this order should look like
711   /// \<3, 0, 1, 2\>. This function extends the order for the reused
712   /// instructions.
713   void findRootOrder(OrdersType &Order) {
714     // If the leaf has the same number of instructions to vectorize as the root
715     // - order must be set already.
716     unsigned RootSize = VectorizableTree[0]->Scalars.size();
717     if (Order.size() == RootSize)
718       return;
719     SmallVector<unsigned, 4> RealOrder(Order.size());
720     std::swap(Order, RealOrder);
721     SmallVector<int, 4> Mask;
722     inversePermutation(RealOrder, Mask);
723     Order.assign(Mask.begin(), Mask.end());
724     // The leaf has less number of instructions - need to find the true order of
725     // the root.
726     // Scan the nodes starting from the leaf back to the root.
727     const TreeEntry *PNode = VectorizableTree.back().get();
728     SmallVector<const TreeEntry *, 4> Nodes(1, PNode);
729     SmallPtrSet<const TreeEntry *, 4> Visited;
730     while (!Nodes.empty() && Order.size() != RootSize) {
731       const TreeEntry *PNode = Nodes.pop_back_val();
732       if (!Visited.insert(PNode).second)
733         continue;
734       const TreeEntry &Node = *PNode;
735       for (const EdgeInfo &EI : Node.UserTreeIndices)
736         if (EI.UserTE)
737           Nodes.push_back(EI.UserTE);
738       if (Node.ReuseShuffleIndices.empty())
739         continue;
740       // Build the order for the parent node.
741       OrdersType NewOrder(Node.ReuseShuffleIndices.size(), RootSize);
742       SmallVector<unsigned, 4> OrderCounter(Order.size(), 0);
743       // The algorithm of the order extension is:
744       // 1. Calculate the number of the same instructions for the order.
745       // 2. Calculate the index of the new order: total number of instructions
746       // with order less than the order of the current instruction + reuse
747       // number of the current instruction.
748       // 3. The new order is just the index of the instruction in the original
749       // vector of the instructions.
750       for (unsigned I : Node.ReuseShuffleIndices)
751         ++OrderCounter[Order[I]];
752       SmallVector<unsigned, 4> CurrentCounter(Order.size(), 0);
753       for (unsigned I = 0, E = Node.ReuseShuffleIndices.size(); I < E; ++I) {
754         unsigned ReusedIdx = Node.ReuseShuffleIndices[I];
755         unsigned OrderIdx = Order[ReusedIdx];
756         unsigned NewIdx = 0;
757         for (unsigned J = 0; J < OrderIdx; ++J)
758           NewIdx += OrderCounter[J];
759         NewIdx += CurrentCounter[OrderIdx];
760         ++CurrentCounter[OrderIdx];
761         assert(NewOrder[NewIdx] == RootSize &&
762                "The order index should not be written already.");
763         NewOrder[NewIdx] = I;
764       }
765       std::swap(Order, NewOrder);
766     }
767     assert(Order.size() == RootSize &&
768            "Root node is expected or the size of the order must be the same as "
769            "the number of elements in the root node.");
770     assert(llvm::all_of(Order,
771                         [RootSize](unsigned Val) { return Val != RootSize; }) &&
772            "All indices must be initialized");
773   }
774 
775   /// \return The vector element size in bits to use when vectorizing the
776   /// expression tree ending at \p V. If V is a store, the size is the width of
777   /// the stored value. Otherwise, the size is the width of the largest loaded
778   /// value reaching V. This method is used by the vectorizer to calculate
779   /// vectorization factors.
780   unsigned getVectorElementSize(Value *V);
781 
782   /// Compute the minimum type sizes required to represent the entries in a
783   /// vectorizable tree.
784   void computeMinimumValueSizes();
785 
786   // \returns maximum vector register size as set by TTI or overridden by cl::opt.
787   unsigned getMaxVecRegSize() const {
788     return MaxVecRegSize;
789   }
790 
791   // \returns minimum vector register size as set by cl::opt.
792   unsigned getMinVecRegSize() const {
793     return MinVecRegSize;
794   }
795 
796   unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
797     unsigned MaxVF = MaxVFOption.getNumOccurrences() ?
798       MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode);
799     return MaxVF ? MaxVF : UINT_MAX;
800   }
801 
802   /// Check if homogeneous aggregate is isomorphic to some VectorType.
803   /// Accepts homogeneous multidimensional aggregate of scalars/vectors like
804   /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> },
805   /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on.
806   ///
807   /// \returns number of elements in vector if isomorphism exists, 0 otherwise.
808   unsigned canMapToVector(Type *T, const DataLayout &DL) const;
809 
810   /// \returns True if the VectorizableTree is both tiny and not fully
811   /// vectorizable. We do not vectorize such trees.
812   bool isTreeTinyAndNotFullyVectorizable() const;
813 
814   /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values
815   /// can be load combined in the backend. Load combining may not be allowed in
816   /// the IR optimizer, so we do not want to alter the pattern. For example,
817   /// partially transforming a scalar bswap() pattern into vector code is
818   /// effectively impossible for the backend to undo.
819   /// TODO: If load combining is allowed in the IR optimizer, this analysis
820   ///       may not be necessary.
821   bool isLoadCombineReductionCandidate(RecurKind RdxKind) const;
822 
823   /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values
824   /// can be load combined in the backend. Load combining may not be allowed in
825   /// the IR optimizer, so we do not want to alter the pattern. For example,
826   /// partially transforming a scalar bswap() pattern into vector code is
827   /// effectively impossible for the backend to undo.
828   /// TODO: If load combining is allowed in the IR optimizer, this analysis
829   ///       may not be necessary.
830   bool isLoadCombineCandidate() const;
831 
832   OptimizationRemarkEmitter *getORE() { return ORE; }
833 
834   /// This structure holds any data we need about the edges being traversed
835   /// during buildTree_rec(). We keep track of:
836   /// (i) the user TreeEntry index, and
837   /// (ii) the index of the edge.
838   struct EdgeInfo {
839     EdgeInfo() = default;
840     EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx)
841         : UserTE(UserTE), EdgeIdx(EdgeIdx) {}
842     /// The user TreeEntry.
843     TreeEntry *UserTE = nullptr;
844     /// The operand index of the use.
845     unsigned EdgeIdx = UINT_MAX;
846 #ifndef NDEBUG
847     friend inline raw_ostream &operator<<(raw_ostream &OS,
848                                           const BoUpSLP::EdgeInfo &EI) {
849       EI.dump(OS);
850       return OS;
851     }
852     /// Debug print.
853     void dump(raw_ostream &OS) const {
854       OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null")
855          << " EdgeIdx:" << EdgeIdx << "}";
856     }
857     LLVM_DUMP_METHOD void dump() const { dump(dbgs()); }
858 #endif
859   };
860 
861   /// A helper data structure to hold the operands of a vector of instructions.
862   /// This supports a fixed vector length for all operand vectors.
863   class VLOperands {
864     /// For each operand we need (i) the value, and (ii) the opcode that it
865     /// would be attached to if the expression was in a left-linearized form.
866     /// This is required to avoid illegal operand reordering.
867     /// For example:
868     /// \verbatim
869     ///                         0 Op1
870     ///                         |/
871     /// Op1 Op2   Linearized    + Op2
872     ///   \ /     ---------->   |/
873     ///    -                    -
874     ///
875     /// Op1 - Op2            (0 + Op1) - Op2
876     /// \endverbatim
877     ///
878     /// Value Op1 is attached to a '+' operation, and Op2 to a '-'.
879     ///
880     /// Another way to think of this is to track all the operations across the
881     /// path from the operand all the way to the root of the tree and to
882     /// calculate the operation that corresponds to this path. For example, the
883     /// path from Op2 to the root crosses the RHS of the '-', therefore the
884     /// corresponding operation is a '-' (which matches the one in the
885     /// linearized tree, as shown above).
886     ///
887     /// For lack of a better term, we refer to this operation as Accumulated
888     /// Path Operation (APO).
889     struct OperandData {
890       OperandData() = default;
891       OperandData(Value *V, bool APO, bool IsUsed)
892           : V(V), APO(APO), IsUsed(IsUsed) {}
893       /// The operand value.
894       Value *V = nullptr;
895       /// TreeEntries only allow a single opcode, or an alternate sequence of
896       /// them (e.g, +, -). Therefore, we can safely use a boolean value for the
897       /// APO. It is set to 'true' if 'V' is attached to an inverse operation
898       /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise
899       /// (e.g., Add/Mul)
900       bool APO = false;
901       /// Helper data for the reordering function.
902       bool IsUsed = false;
903     };
904 
905     /// During operand reordering, we are trying to select the operand at lane
906     /// that matches best with the operand at the neighboring lane. Our
907     /// selection is based on the type of value we are looking for. For example,
908     /// if the neighboring lane has a load, we need to look for a load that is
909     /// accessing a consecutive address. These strategies are summarized in the
910     /// 'ReorderingMode' enumerator.
911     enum class ReorderingMode {
912       Load,     ///< Matching loads to consecutive memory addresses
913       Opcode,   ///< Matching instructions based on opcode (same or alternate)
914       Constant, ///< Matching constants
915       Splat,    ///< Matching the same instruction multiple times (broadcast)
916       Failed,   ///< We failed to create a vectorizable group
917     };
918 
919     using OperandDataVec = SmallVector<OperandData, 2>;
920 
921     /// A vector of operand vectors.
922     SmallVector<OperandDataVec, 4> OpsVec;
923 
924     const DataLayout &DL;
925     ScalarEvolution &SE;
926     const BoUpSLP &R;
927 
928     /// \returns the operand data at \p OpIdx and \p Lane.
929     OperandData &getData(unsigned OpIdx, unsigned Lane) {
930       return OpsVec[OpIdx][Lane];
931     }
932 
933     /// \returns the operand data at \p OpIdx and \p Lane. Const version.
934     const OperandData &getData(unsigned OpIdx, unsigned Lane) const {
935       return OpsVec[OpIdx][Lane];
936     }
937 
938     /// Clears the used flag for all entries.
939     void clearUsed() {
940       for (unsigned OpIdx = 0, NumOperands = getNumOperands();
941            OpIdx != NumOperands; ++OpIdx)
942         for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes;
943              ++Lane)
944           OpsVec[OpIdx][Lane].IsUsed = false;
945     }
946 
947     /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2.
948     void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) {
949       std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]);
950     }
951 
952     // The hard-coded scores listed here are not very important. When computing
953     // the scores of matching one sub-tree with another, we are basically
954     // counting the number of values that are matching. So even if all scores
955     // are set to 1, we would still get a decent matching result.
956     // However, sometimes we have to break ties. For example we may have to
957     // choose between matching loads vs matching opcodes. This is what these
958     // scores are helping us with: they provide the order of preference.
959 
960     /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]).
961     static const int ScoreConsecutiveLoads = 3;
962     /// ExtractElementInst from same vector and consecutive indexes.
963     static const int ScoreConsecutiveExtracts = 3;
964     /// Constants.
965     static const int ScoreConstants = 2;
966     /// Instructions with the same opcode.
967     static const int ScoreSameOpcode = 2;
968     /// Instructions with alt opcodes (e.g, add + sub).
969     static const int ScoreAltOpcodes = 1;
970     /// Identical instructions (a.k.a. splat or broadcast).
971     static const int ScoreSplat = 1;
972     /// Matching with an undef is preferable to failing.
973     static const int ScoreUndef = 1;
974     /// Score for failing to find a decent match.
975     static const int ScoreFail = 0;
976     /// User exteranl to the vectorized code.
977     static const int ExternalUseCost = 1;
978     /// The user is internal but in a different lane.
979     static const int UserInDiffLaneCost = ExternalUseCost;
980 
981     /// \returns the score of placing \p V1 and \p V2 in consecutive lanes.
982     static int getShallowScore(Value *V1, Value *V2, const DataLayout &DL,
983                                ScalarEvolution &SE) {
984       auto *LI1 = dyn_cast<LoadInst>(V1);
985       auto *LI2 = dyn_cast<LoadInst>(V2);
986       if (LI1 && LI2) {
987         if (LI1->getParent() != LI2->getParent())
988           return VLOperands::ScoreFail;
989 
990         Optional<int> Dist = getPointersDiff(
991             LI1->getType(), LI1->getPointerOperand(), LI2->getType(),
992             LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true);
993         return (Dist && *Dist == 1) ? VLOperands::ScoreConsecutiveLoads
994                                     : VLOperands::ScoreFail;
995       }
996 
997       auto *C1 = dyn_cast<Constant>(V1);
998       auto *C2 = dyn_cast<Constant>(V2);
999       if (C1 && C2)
1000         return VLOperands::ScoreConstants;
1001 
1002       // Extracts from consecutive indexes of the same vector better score as
1003       // the extracts could be optimized away.
1004       Value *EV;
1005       ConstantInt *Ex1Idx, *Ex2Idx;
1006       if (match(V1, m_ExtractElt(m_Value(EV), m_ConstantInt(Ex1Idx))) &&
1007           match(V2, m_ExtractElt(m_Deferred(EV), m_ConstantInt(Ex2Idx))) &&
1008           Ex1Idx->getZExtValue() + 1 == Ex2Idx->getZExtValue())
1009         return VLOperands::ScoreConsecutiveExtracts;
1010 
1011       auto *I1 = dyn_cast<Instruction>(V1);
1012       auto *I2 = dyn_cast<Instruction>(V2);
1013       if (I1 && I2) {
1014         if (I1 == I2)
1015           return VLOperands::ScoreSplat;
1016         InstructionsState S = getSameOpcode({I1, I2});
1017         // Note: Only consider instructions with <= 2 operands to avoid
1018         // complexity explosion.
1019         if (S.getOpcode() && S.MainOp->getNumOperands() <= 2)
1020           return S.isAltShuffle() ? VLOperands::ScoreAltOpcodes
1021                                   : VLOperands::ScoreSameOpcode;
1022       }
1023 
1024       if (isa<UndefValue>(V2))
1025         return VLOperands::ScoreUndef;
1026 
1027       return VLOperands::ScoreFail;
1028     }
1029 
1030     /// Holds the values and their lane that are taking part in the look-ahead
1031     /// score calculation. This is used in the external uses cost calculation.
1032     SmallDenseMap<Value *, int> InLookAheadValues;
1033 
1034     /// \Returns the additinal cost due to uses of \p LHS and \p RHS that are
1035     /// either external to the vectorized code, or require shuffling.
1036     int getExternalUsesCost(const std::pair<Value *, int> &LHS,
1037                             const std::pair<Value *, int> &RHS) {
1038       int Cost = 0;
1039       std::array<std::pair<Value *, int>, 2> Values = {{LHS, RHS}};
1040       for (int Idx = 0, IdxE = Values.size(); Idx != IdxE; ++Idx) {
1041         Value *V = Values[Idx].first;
1042         if (isa<Constant>(V)) {
1043           // Since this is a function pass, it doesn't make semantic sense to
1044           // walk the users of a subclass of Constant. The users could be in
1045           // another function, or even another module that happens to be in
1046           // the same LLVMContext.
1047           continue;
1048         }
1049 
1050         // Calculate the absolute lane, using the minimum relative lane of LHS
1051         // and RHS as base and Idx as the offset.
1052         int Ln = std::min(LHS.second, RHS.second) + Idx;
1053         assert(Ln >= 0 && "Bad lane calculation");
1054         unsigned UsersBudget = LookAheadUsersBudget;
1055         for (User *U : V->users()) {
1056           if (const TreeEntry *UserTE = R.getTreeEntry(U)) {
1057             // The user is in the VectorizableTree. Check if we need to insert.
1058             auto It = llvm::find(UserTE->Scalars, U);
1059             assert(It != UserTE->Scalars.end() && "U is in UserTE");
1060             int UserLn = std::distance(UserTE->Scalars.begin(), It);
1061             assert(UserLn >= 0 && "Bad lane");
1062             if (UserLn != Ln)
1063               Cost += UserInDiffLaneCost;
1064           } else {
1065             // Check if the user is in the look-ahead code.
1066             auto It2 = InLookAheadValues.find(U);
1067             if (It2 != InLookAheadValues.end()) {
1068               // The user is in the look-ahead code. Check the lane.
1069               if (It2->second != Ln)
1070                 Cost += UserInDiffLaneCost;
1071             } else {
1072               // The user is neither in SLP tree nor in the look-ahead code.
1073               Cost += ExternalUseCost;
1074             }
1075           }
1076           // Limit the number of visited uses to cap compilation time.
1077           if (--UsersBudget == 0)
1078             break;
1079         }
1080       }
1081       return Cost;
1082     }
1083 
1084     /// Go through the operands of \p LHS and \p RHS recursively until \p
1085     /// MaxLevel, and return the cummulative score. For example:
1086     /// \verbatim
1087     ///  A[0]  B[0]  A[1]  B[1]  C[0] D[0]  B[1] A[1]
1088     ///     \ /         \ /         \ /        \ /
1089     ///      +           +           +          +
1090     ///     G1          G2          G3         G4
1091     /// \endverbatim
1092     /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at
1093     /// each level recursively, accumulating the score. It starts from matching
1094     /// the additions at level 0, then moves on to the loads (level 1). The
1095     /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and
1096     /// {B[0],B[1]} match with VLOperands::ScoreConsecutiveLoads, while
1097     /// {A[0],C[0]} has a score of VLOperands::ScoreFail.
1098     /// Please note that the order of the operands does not matter, as we
1099     /// evaluate the score of all profitable combinations of operands. In
1100     /// other words the score of G1 and G4 is the same as G1 and G2. This
1101     /// heuristic is based on ideas described in:
1102     ///   Look-ahead SLP: Auto-vectorization in the presence of commutative
1103     ///   operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha,
1104     ///   Luís F. W. Góes
1105     int getScoreAtLevelRec(const std::pair<Value *, int> &LHS,
1106                            const std::pair<Value *, int> &RHS, int CurrLevel,
1107                            int MaxLevel) {
1108 
1109       Value *V1 = LHS.first;
1110       Value *V2 = RHS.first;
1111       // Get the shallow score of V1 and V2.
1112       int ShallowScoreAtThisLevel =
1113           std::max((int)ScoreFail, getShallowScore(V1, V2, DL, SE) -
1114                                        getExternalUsesCost(LHS, RHS));
1115       int Lane1 = LHS.second;
1116       int Lane2 = RHS.second;
1117 
1118       // If reached MaxLevel,
1119       //  or if V1 and V2 are not instructions,
1120       //  or if they are SPLAT,
1121       //  or if they are not consecutive, early return the current cost.
1122       auto *I1 = dyn_cast<Instruction>(V1);
1123       auto *I2 = dyn_cast<Instruction>(V2);
1124       if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 ||
1125           ShallowScoreAtThisLevel == VLOperands::ScoreFail ||
1126           (isa<LoadInst>(I1) && isa<LoadInst>(I2) && ShallowScoreAtThisLevel))
1127         return ShallowScoreAtThisLevel;
1128       assert(I1 && I2 && "Should have early exited.");
1129 
1130       // Keep track of in-tree values for determining the external-use cost.
1131       InLookAheadValues[V1] = Lane1;
1132       InLookAheadValues[V2] = Lane2;
1133 
1134       // Contains the I2 operand indexes that got matched with I1 operands.
1135       SmallSet<unsigned, 4> Op2Used;
1136 
1137       // Recursion towards the operands of I1 and I2. We are trying all possbile
1138       // operand pairs, and keeping track of the best score.
1139       for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands();
1140            OpIdx1 != NumOperands1; ++OpIdx1) {
1141         // Try to pair op1I with the best operand of I2.
1142         int MaxTmpScore = 0;
1143         unsigned MaxOpIdx2 = 0;
1144         bool FoundBest = false;
1145         // If I2 is commutative try all combinations.
1146         unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1;
1147         unsigned ToIdx = isCommutative(I2)
1148                              ? I2->getNumOperands()
1149                              : std::min(I2->getNumOperands(), OpIdx1 + 1);
1150         assert(FromIdx <= ToIdx && "Bad index");
1151         for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) {
1152           // Skip operands already paired with OpIdx1.
1153           if (Op2Used.count(OpIdx2))
1154             continue;
1155           // Recursively calculate the cost at each level
1156           int TmpScore = getScoreAtLevelRec({I1->getOperand(OpIdx1), Lane1},
1157                                             {I2->getOperand(OpIdx2), Lane2},
1158                                             CurrLevel + 1, MaxLevel);
1159           // Look for the best score.
1160           if (TmpScore > VLOperands::ScoreFail && TmpScore > MaxTmpScore) {
1161             MaxTmpScore = TmpScore;
1162             MaxOpIdx2 = OpIdx2;
1163             FoundBest = true;
1164           }
1165         }
1166         if (FoundBest) {
1167           // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it.
1168           Op2Used.insert(MaxOpIdx2);
1169           ShallowScoreAtThisLevel += MaxTmpScore;
1170         }
1171       }
1172       return ShallowScoreAtThisLevel;
1173     }
1174 
1175     /// \Returns the look-ahead score, which tells us how much the sub-trees
1176     /// rooted at \p LHS and \p RHS match, the more they match the higher the
1177     /// score. This helps break ties in an informed way when we cannot decide on
1178     /// the order of the operands by just considering the immediate
1179     /// predecessors.
1180     int getLookAheadScore(const std::pair<Value *, int> &LHS,
1181                           const std::pair<Value *, int> &RHS) {
1182       InLookAheadValues.clear();
1183       return getScoreAtLevelRec(LHS, RHS, 1, LookAheadMaxDepth);
1184     }
1185 
1186     // Search all operands in Ops[*][Lane] for the one that matches best
1187     // Ops[OpIdx][LastLane] and return its opreand index.
1188     // If no good match can be found, return None.
1189     Optional<unsigned>
1190     getBestOperand(unsigned OpIdx, int Lane, int LastLane,
1191                    ArrayRef<ReorderingMode> ReorderingModes) {
1192       unsigned NumOperands = getNumOperands();
1193 
1194       // The operand of the previous lane at OpIdx.
1195       Value *OpLastLane = getData(OpIdx, LastLane).V;
1196 
1197       // Our strategy mode for OpIdx.
1198       ReorderingMode RMode = ReorderingModes[OpIdx];
1199 
1200       // The linearized opcode of the operand at OpIdx, Lane.
1201       bool OpIdxAPO = getData(OpIdx, Lane).APO;
1202 
1203       // The best operand index and its score.
1204       // Sometimes we have more than one option (e.g., Opcode and Undefs), so we
1205       // are using the score to differentiate between the two.
1206       struct BestOpData {
1207         Optional<unsigned> Idx = None;
1208         unsigned Score = 0;
1209       } BestOp;
1210 
1211       // Iterate through all unused operands and look for the best.
1212       for (unsigned Idx = 0; Idx != NumOperands; ++Idx) {
1213         // Get the operand at Idx and Lane.
1214         OperandData &OpData = getData(Idx, Lane);
1215         Value *Op = OpData.V;
1216         bool OpAPO = OpData.APO;
1217 
1218         // Skip already selected operands.
1219         if (OpData.IsUsed)
1220           continue;
1221 
1222         // Skip if we are trying to move the operand to a position with a
1223         // different opcode in the linearized tree form. This would break the
1224         // semantics.
1225         if (OpAPO != OpIdxAPO)
1226           continue;
1227 
1228         // Look for an operand that matches the current mode.
1229         switch (RMode) {
1230         case ReorderingMode::Load:
1231         case ReorderingMode::Constant:
1232         case ReorderingMode::Opcode: {
1233           bool LeftToRight = Lane > LastLane;
1234           Value *OpLeft = (LeftToRight) ? OpLastLane : Op;
1235           Value *OpRight = (LeftToRight) ? Op : OpLastLane;
1236           unsigned Score =
1237               getLookAheadScore({OpLeft, LastLane}, {OpRight, Lane});
1238           if (Score > BestOp.Score) {
1239             BestOp.Idx = Idx;
1240             BestOp.Score = Score;
1241           }
1242           break;
1243         }
1244         case ReorderingMode::Splat:
1245           if (Op == OpLastLane)
1246             BestOp.Idx = Idx;
1247           break;
1248         case ReorderingMode::Failed:
1249           return None;
1250         }
1251       }
1252 
1253       if (BestOp.Idx) {
1254         getData(BestOp.Idx.getValue(), Lane).IsUsed = true;
1255         return BestOp.Idx;
1256       }
1257       // If we could not find a good match return None.
1258       return None;
1259     }
1260 
1261     /// Helper for reorderOperandVecs. \Returns the lane that we should start
1262     /// reordering from. This is the one which has the least number of operands
1263     /// that can freely move about.
1264     unsigned getBestLaneToStartReordering() const {
1265       unsigned BestLane = 0;
1266       unsigned Min = UINT_MAX;
1267       for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes;
1268            ++Lane) {
1269         unsigned NumFreeOps = getMaxNumOperandsThatCanBeReordered(Lane);
1270         if (NumFreeOps < Min) {
1271           Min = NumFreeOps;
1272           BestLane = Lane;
1273         }
1274       }
1275       return BestLane;
1276     }
1277 
1278     /// \Returns the maximum number of operands that are allowed to be reordered
1279     /// for \p Lane. This is used as a heuristic for selecting the first lane to
1280     /// start operand reordering.
1281     unsigned getMaxNumOperandsThatCanBeReordered(unsigned Lane) const {
1282       unsigned CntTrue = 0;
1283       unsigned NumOperands = getNumOperands();
1284       // Operands with the same APO can be reordered. We therefore need to count
1285       // how many of them we have for each APO, like this: Cnt[APO] = x.
1286       // Since we only have two APOs, namely true and false, we can avoid using
1287       // a map. Instead we can simply count the number of operands that
1288       // correspond to one of them (in this case the 'true' APO), and calculate
1289       // the other by subtracting it from the total number of operands.
1290       for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx)
1291         if (getData(OpIdx, Lane).APO)
1292           ++CntTrue;
1293       unsigned CntFalse = NumOperands - CntTrue;
1294       return std::max(CntTrue, CntFalse);
1295     }
1296 
1297     /// Go through the instructions in VL and append their operands.
1298     void appendOperandsOfVL(ArrayRef<Value *> VL) {
1299       assert(!VL.empty() && "Bad VL");
1300       assert((empty() || VL.size() == getNumLanes()) &&
1301              "Expected same number of lanes");
1302       assert(isa<Instruction>(VL[0]) && "Expected instruction");
1303       unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands();
1304       OpsVec.resize(NumOperands);
1305       unsigned NumLanes = VL.size();
1306       for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1307         OpsVec[OpIdx].resize(NumLanes);
1308         for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1309           assert(isa<Instruction>(VL[Lane]) && "Expected instruction");
1310           // Our tree has just 3 nodes: the root and two operands.
1311           // It is therefore trivial to get the APO. We only need to check the
1312           // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or
1313           // RHS operand. The LHS operand of both add and sub is never attached
1314           // to an inversese operation in the linearized form, therefore its APO
1315           // is false. The RHS is true only if VL[Lane] is an inverse operation.
1316 
1317           // Since operand reordering is performed on groups of commutative
1318           // operations or alternating sequences (e.g., +, -), we can safely
1319           // tell the inverse operations by checking commutativity.
1320           bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane]));
1321           bool APO = (OpIdx == 0) ? false : IsInverseOperation;
1322           OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx),
1323                                  APO, false};
1324         }
1325       }
1326     }
1327 
1328     /// \returns the number of operands.
1329     unsigned getNumOperands() const { return OpsVec.size(); }
1330 
1331     /// \returns the number of lanes.
1332     unsigned getNumLanes() const { return OpsVec[0].size(); }
1333 
1334     /// \returns the operand value at \p OpIdx and \p Lane.
1335     Value *getValue(unsigned OpIdx, unsigned Lane) const {
1336       return getData(OpIdx, Lane).V;
1337     }
1338 
1339     /// \returns true if the data structure is empty.
1340     bool empty() const { return OpsVec.empty(); }
1341 
1342     /// Clears the data.
1343     void clear() { OpsVec.clear(); }
1344 
1345     /// \Returns true if there are enough operands identical to \p Op to fill
1346     /// the whole vector.
1347     /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow.
1348     bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) {
1349       bool OpAPO = getData(OpIdx, Lane).APO;
1350       for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) {
1351         if (Ln == Lane)
1352           continue;
1353         // This is set to true if we found a candidate for broadcast at Lane.
1354         bool FoundCandidate = false;
1355         for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) {
1356           OperandData &Data = getData(OpI, Ln);
1357           if (Data.APO != OpAPO || Data.IsUsed)
1358             continue;
1359           if (Data.V == Op) {
1360             FoundCandidate = true;
1361             Data.IsUsed = true;
1362             break;
1363           }
1364         }
1365         if (!FoundCandidate)
1366           return false;
1367       }
1368       return true;
1369     }
1370 
1371   public:
1372     /// Initialize with all the operands of the instruction vector \p RootVL.
1373     VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL,
1374                ScalarEvolution &SE, const BoUpSLP &R)
1375         : DL(DL), SE(SE), R(R) {
1376       // Append all the operands of RootVL.
1377       appendOperandsOfVL(RootVL);
1378     }
1379 
1380     /// \Returns a value vector with the operands across all lanes for the
1381     /// opearnd at \p OpIdx.
1382     ValueList getVL(unsigned OpIdx) const {
1383       ValueList OpVL(OpsVec[OpIdx].size());
1384       assert(OpsVec[OpIdx].size() == getNumLanes() &&
1385              "Expected same num of lanes across all operands");
1386       for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane)
1387         OpVL[Lane] = OpsVec[OpIdx][Lane].V;
1388       return OpVL;
1389     }
1390 
1391     // Performs operand reordering for 2 or more operands.
1392     // The original operands are in OrigOps[OpIdx][Lane].
1393     // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'.
1394     void reorder() {
1395       unsigned NumOperands = getNumOperands();
1396       unsigned NumLanes = getNumLanes();
1397       // Each operand has its own mode. We are using this mode to help us select
1398       // the instructions for each lane, so that they match best with the ones
1399       // we have selected so far.
1400       SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands);
1401 
1402       // This is a greedy single-pass algorithm. We are going over each lane
1403       // once and deciding on the best order right away with no back-tracking.
1404       // However, in order to increase its effectiveness, we start with the lane
1405       // that has operands that can move the least. For example, given the
1406       // following lanes:
1407       //  Lane 0 : A[0] = B[0] + C[0]   // Visited 3rd
1408       //  Lane 1 : A[1] = C[1] - B[1]   // Visited 1st
1409       //  Lane 2 : A[2] = B[2] + C[2]   // Visited 2nd
1410       //  Lane 3 : A[3] = C[3] - B[3]   // Visited 4th
1411       // we will start at Lane 1, since the operands of the subtraction cannot
1412       // be reordered. Then we will visit the rest of the lanes in a circular
1413       // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3.
1414 
1415       // Find the first lane that we will start our search from.
1416       unsigned FirstLane = getBestLaneToStartReordering();
1417 
1418       // Initialize the modes.
1419       for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1420         Value *OpLane0 = getValue(OpIdx, FirstLane);
1421         // Keep track if we have instructions with all the same opcode on one
1422         // side.
1423         if (isa<LoadInst>(OpLane0))
1424           ReorderingModes[OpIdx] = ReorderingMode::Load;
1425         else if (isa<Instruction>(OpLane0)) {
1426           // Check if OpLane0 should be broadcast.
1427           if (shouldBroadcast(OpLane0, OpIdx, FirstLane))
1428             ReorderingModes[OpIdx] = ReorderingMode::Splat;
1429           else
1430             ReorderingModes[OpIdx] = ReorderingMode::Opcode;
1431         }
1432         else if (isa<Constant>(OpLane0))
1433           ReorderingModes[OpIdx] = ReorderingMode::Constant;
1434         else if (isa<Argument>(OpLane0))
1435           // Our best hope is a Splat. It may save some cost in some cases.
1436           ReorderingModes[OpIdx] = ReorderingMode::Splat;
1437         else
1438           // NOTE: This should be unreachable.
1439           ReorderingModes[OpIdx] = ReorderingMode::Failed;
1440       }
1441 
1442       // If the initial strategy fails for any of the operand indexes, then we
1443       // perform reordering again in a second pass. This helps avoid assigning
1444       // high priority to the failed strategy, and should improve reordering for
1445       // the non-failed operand indexes.
1446       for (int Pass = 0; Pass != 2; ++Pass) {
1447         // Skip the second pass if the first pass did not fail.
1448         bool StrategyFailed = false;
1449         // Mark all operand data as free to use.
1450         clearUsed();
1451         // We keep the original operand order for the FirstLane, so reorder the
1452         // rest of the lanes. We are visiting the nodes in a circular fashion,
1453         // using FirstLane as the center point and increasing the radius
1454         // distance.
1455         for (unsigned Distance = 1; Distance != NumLanes; ++Distance) {
1456           // Visit the lane on the right and then the lane on the left.
1457           for (int Direction : {+1, -1}) {
1458             int Lane = FirstLane + Direction * Distance;
1459             if (Lane < 0 || Lane >= (int)NumLanes)
1460               continue;
1461             int LastLane = Lane - Direction;
1462             assert(LastLane >= 0 && LastLane < (int)NumLanes &&
1463                    "Out of bounds");
1464             // Look for a good match for each operand.
1465             for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1466               // Search for the operand that matches SortedOps[OpIdx][Lane-1].
1467               Optional<unsigned> BestIdx =
1468                   getBestOperand(OpIdx, Lane, LastLane, ReorderingModes);
1469               // By not selecting a value, we allow the operands that follow to
1470               // select a better matching value. We will get a non-null value in
1471               // the next run of getBestOperand().
1472               if (BestIdx) {
1473                 // Swap the current operand with the one returned by
1474                 // getBestOperand().
1475                 swap(OpIdx, BestIdx.getValue(), Lane);
1476               } else {
1477                 // We failed to find a best operand, set mode to 'Failed'.
1478                 ReorderingModes[OpIdx] = ReorderingMode::Failed;
1479                 // Enable the second pass.
1480                 StrategyFailed = true;
1481               }
1482             }
1483           }
1484         }
1485         // Skip second pass if the strategy did not fail.
1486         if (!StrategyFailed)
1487           break;
1488       }
1489     }
1490 
1491 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1492     LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) {
1493       switch (RMode) {
1494       case ReorderingMode::Load:
1495         return "Load";
1496       case ReorderingMode::Opcode:
1497         return "Opcode";
1498       case ReorderingMode::Constant:
1499         return "Constant";
1500       case ReorderingMode::Splat:
1501         return "Splat";
1502       case ReorderingMode::Failed:
1503         return "Failed";
1504       }
1505       llvm_unreachable("Unimplemented Reordering Type");
1506     }
1507 
1508     LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode,
1509                                                    raw_ostream &OS) {
1510       return OS << getModeStr(RMode);
1511     }
1512 
1513     /// Debug print.
1514     LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) {
1515       printMode(RMode, dbgs());
1516     }
1517 
1518     friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) {
1519       return printMode(RMode, OS);
1520     }
1521 
1522     LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const {
1523       const unsigned Indent = 2;
1524       unsigned Cnt = 0;
1525       for (const OperandDataVec &OpDataVec : OpsVec) {
1526         OS << "Operand " << Cnt++ << "\n";
1527         for (const OperandData &OpData : OpDataVec) {
1528           OS.indent(Indent) << "{";
1529           if (Value *V = OpData.V)
1530             OS << *V;
1531           else
1532             OS << "null";
1533           OS << ", APO:" << OpData.APO << "}\n";
1534         }
1535         OS << "\n";
1536       }
1537       return OS;
1538     }
1539 
1540     /// Debug print.
1541     LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
1542 #endif
1543   };
1544 
1545   /// Checks if the instruction is marked for deletion.
1546   bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); }
1547 
1548   /// Marks values operands for later deletion by replacing them with Undefs.
1549   void eraseInstructions(ArrayRef<Value *> AV);
1550 
1551   ~BoUpSLP();
1552 
1553 private:
1554   /// Checks if all users of \p I are the part of the vectorization tree.
1555   bool areAllUsersVectorized(Instruction *I,
1556                              ArrayRef<Value *> VectorizedVals) const;
1557 
1558   /// \returns the cost of the vectorizable entry.
1559   InstructionCost getEntryCost(const TreeEntry *E,
1560                                ArrayRef<Value *> VectorizedVals);
1561 
1562   /// This is the recursive part of buildTree.
1563   void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth,
1564                      const EdgeInfo &EI);
1565 
1566   /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can
1567   /// be vectorized to use the original vector (or aggregate "bitcast" to a
1568   /// vector) and sets \p CurrentOrder to the identity permutation; otherwise
1569   /// returns false, setting \p CurrentOrder to either an empty vector or a
1570   /// non-identity permutation that allows to reuse extract instructions.
1571   bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
1572                        SmallVectorImpl<unsigned> &CurrentOrder) const;
1573 
1574   /// Vectorize a single entry in the tree.
1575   Value *vectorizeTree(TreeEntry *E);
1576 
1577   /// Vectorize a single entry in the tree, starting in \p VL.
1578   Value *vectorizeTree(ArrayRef<Value *> VL);
1579 
1580   /// \returns the scalarization cost for this type. Scalarization in this
1581   /// context means the creation of vectors from a group of scalars.
1582   InstructionCost
1583   getGatherCost(FixedVectorType *Ty,
1584                 const DenseSet<unsigned> &ShuffledIndices) const;
1585 
1586   /// Checks if the gathered \p VL can be represented as shuffle(s) of previous
1587   /// tree entries.
1588   /// \returns ShuffleKind, if gathered values can be represented as shuffles of
1589   /// previous tree entries. \p Mask is filled with the shuffle mask.
1590   Optional<TargetTransformInfo::ShuffleKind>
1591   isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask,
1592                         SmallVectorImpl<const TreeEntry *> &Entries);
1593 
1594   /// \returns the scalarization cost for this list of values. Assuming that
1595   /// this subtree gets vectorized, we may need to extract the values from the
1596   /// roots. This method calculates the cost of extracting the values.
1597   InstructionCost getGatherCost(ArrayRef<Value *> VL) const;
1598 
1599   /// Set the Builder insert point to one after the last instruction in
1600   /// the bundle
1601   void setInsertPointAfterBundle(const TreeEntry *E);
1602 
1603   /// \returns a vector from a collection of scalars in \p VL.
1604   Value *gather(ArrayRef<Value *> VL);
1605 
1606   /// \returns whether the VectorizableTree is fully vectorizable and will
1607   /// be beneficial even the tree height is tiny.
1608   bool isFullyVectorizableTinyTree() const;
1609 
1610   /// Reorder commutative or alt operands to get better probability of
1611   /// generating vectorized code.
1612   static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
1613                                              SmallVectorImpl<Value *> &Left,
1614                                              SmallVectorImpl<Value *> &Right,
1615                                              const DataLayout &DL,
1616                                              ScalarEvolution &SE,
1617                                              const BoUpSLP &R);
1618   struct TreeEntry {
1619     using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>;
1620     TreeEntry(VecTreeTy &Container) : Container(Container) {}
1621 
1622     /// \returns true if the scalars in VL are equal to this entry.
1623     bool isSame(ArrayRef<Value *> VL) const {
1624       if (VL.size() == Scalars.size())
1625         return std::equal(VL.begin(), VL.end(), Scalars.begin());
1626       return VL.size() == ReuseShuffleIndices.size() &&
1627              std::equal(
1628                  VL.begin(), VL.end(), ReuseShuffleIndices.begin(),
1629                  [this](Value *V, int Idx) { return V == Scalars[Idx]; });
1630     }
1631 
1632     /// A vector of scalars.
1633     ValueList Scalars;
1634 
1635     /// The Scalars are vectorized into this value. It is initialized to Null.
1636     Value *VectorizedValue = nullptr;
1637 
1638     /// Do we need to gather this sequence or vectorize it
1639     /// (either with vector instruction or with scatter/gather
1640     /// intrinsics for store/load)?
1641     enum EntryState { Vectorize, ScatterVectorize, NeedToGather };
1642     EntryState State;
1643 
1644     /// Does this sequence require some shuffling?
1645     SmallVector<int, 4> ReuseShuffleIndices;
1646 
1647     /// Does this entry require reordering?
1648     SmallVector<unsigned, 4> ReorderIndices;
1649 
1650     /// Points back to the VectorizableTree.
1651     ///
1652     /// Only used for Graphviz right now.  Unfortunately GraphTrait::NodeRef has
1653     /// to be a pointer and needs to be able to initialize the child iterator.
1654     /// Thus we need a reference back to the container to translate the indices
1655     /// to entries.
1656     VecTreeTy &Container;
1657 
1658     /// The TreeEntry index containing the user of this entry.  We can actually
1659     /// have multiple users so the data structure is not truly a tree.
1660     SmallVector<EdgeInfo, 1> UserTreeIndices;
1661 
1662     /// The index of this treeEntry in VectorizableTree.
1663     int Idx = -1;
1664 
1665   private:
1666     /// The operands of each instruction in each lane Operands[op_index][lane].
1667     /// Note: This helps avoid the replication of the code that performs the
1668     /// reordering of operands during buildTree_rec() and vectorizeTree().
1669     SmallVector<ValueList, 2> Operands;
1670 
1671     /// The main/alternate instruction.
1672     Instruction *MainOp = nullptr;
1673     Instruction *AltOp = nullptr;
1674 
1675   public:
1676     /// Set this bundle's \p OpIdx'th operand to \p OpVL.
1677     void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) {
1678       if (Operands.size() < OpIdx + 1)
1679         Operands.resize(OpIdx + 1);
1680       assert(Operands[OpIdx].empty() && "Already resized?");
1681       Operands[OpIdx].resize(Scalars.size());
1682       for (unsigned Lane = 0, E = Scalars.size(); Lane != E; ++Lane)
1683         Operands[OpIdx][Lane] = OpVL[Lane];
1684     }
1685 
1686     /// Set the operands of this bundle in their original order.
1687     void setOperandsInOrder() {
1688       assert(Operands.empty() && "Already initialized?");
1689       auto *I0 = cast<Instruction>(Scalars[0]);
1690       Operands.resize(I0->getNumOperands());
1691       unsigned NumLanes = Scalars.size();
1692       for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands();
1693            OpIdx != NumOperands; ++OpIdx) {
1694         Operands[OpIdx].resize(NumLanes);
1695         for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1696           auto *I = cast<Instruction>(Scalars[Lane]);
1697           assert(I->getNumOperands() == NumOperands &&
1698                  "Expected same number of operands");
1699           Operands[OpIdx][Lane] = I->getOperand(OpIdx);
1700         }
1701       }
1702     }
1703 
1704     /// \returns the \p OpIdx operand of this TreeEntry.
1705     ValueList &getOperand(unsigned OpIdx) {
1706       assert(OpIdx < Operands.size() && "Off bounds");
1707       return Operands[OpIdx];
1708     }
1709 
1710     /// \returns the number of operands.
1711     unsigned getNumOperands() const { return Operands.size(); }
1712 
1713     /// \return the single \p OpIdx operand.
1714     Value *getSingleOperand(unsigned OpIdx) const {
1715       assert(OpIdx < Operands.size() && "Off bounds");
1716       assert(!Operands[OpIdx].empty() && "No operand available");
1717       return Operands[OpIdx][0];
1718     }
1719 
1720     /// Some of the instructions in the list have alternate opcodes.
1721     bool isAltShuffle() const {
1722       return getOpcode() != getAltOpcode();
1723     }
1724 
1725     bool isOpcodeOrAlt(Instruction *I) const {
1726       unsigned CheckedOpcode = I->getOpcode();
1727       return (getOpcode() == CheckedOpcode ||
1728               getAltOpcode() == CheckedOpcode);
1729     }
1730 
1731     /// Chooses the correct key for scheduling data. If \p Op has the same (or
1732     /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is
1733     /// \p OpValue.
1734     Value *isOneOf(Value *Op) const {
1735       auto *I = dyn_cast<Instruction>(Op);
1736       if (I && isOpcodeOrAlt(I))
1737         return Op;
1738       return MainOp;
1739     }
1740 
1741     void setOperations(const InstructionsState &S) {
1742       MainOp = S.MainOp;
1743       AltOp = S.AltOp;
1744     }
1745 
1746     Instruction *getMainOp() const {
1747       return MainOp;
1748     }
1749 
1750     Instruction *getAltOp() const {
1751       return AltOp;
1752     }
1753 
1754     /// The main/alternate opcodes for the list of instructions.
1755     unsigned getOpcode() const {
1756       return MainOp ? MainOp->getOpcode() : 0;
1757     }
1758 
1759     unsigned getAltOpcode() const {
1760       return AltOp ? AltOp->getOpcode() : 0;
1761     }
1762 
1763     /// Update operations state of this entry if reorder occurred.
1764     bool updateStateIfReorder() {
1765       if (ReorderIndices.empty())
1766         return false;
1767       InstructionsState S = getSameOpcode(Scalars, ReorderIndices.front());
1768       setOperations(S);
1769       return true;
1770     }
1771     /// When ReuseShuffleIndices is empty it just returns position of \p V
1772     /// within vector of Scalars. Otherwise, try to remap on its reuse index.
1773     int findLaneForValue(Value *V) const {
1774       unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V));
1775       assert(FoundLane < Scalars.size() && "Couldn't find extract lane");
1776       if (!ReuseShuffleIndices.empty()) {
1777         FoundLane = std::distance(ReuseShuffleIndices.begin(),
1778                                   find(ReuseShuffleIndices, FoundLane));
1779       }
1780       return FoundLane;
1781     }
1782 
1783 #ifndef NDEBUG
1784     /// Debug printer.
1785     LLVM_DUMP_METHOD void dump() const {
1786       dbgs() << Idx << ".\n";
1787       for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) {
1788         dbgs() << "Operand " << OpI << ":\n";
1789         for (const Value *V : Operands[OpI])
1790           dbgs().indent(2) << *V << "\n";
1791       }
1792       dbgs() << "Scalars: \n";
1793       for (Value *V : Scalars)
1794         dbgs().indent(2) << *V << "\n";
1795       dbgs() << "State: ";
1796       switch (State) {
1797       case Vectorize:
1798         dbgs() << "Vectorize\n";
1799         break;
1800       case ScatterVectorize:
1801         dbgs() << "ScatterVectorize\n";
1802         break;
1803       case NeedToGather:
1804         dbgs() << "NeedToGather\n";
1805         break;
1806       }
1807       dbgs() << "MainOp: ";
1808       if (MainOp)
1809         dbgs() << *MainOp << "\n";
1810       else
1811         dbgs() << "NULL\n";
1812       dbgs() << "AltOp: ";
1813       if (AltOp)
1814         dbgs() << *AltOp << "\n";
1815       else
1816         dbgs() << "NULL\n";
1817       dbgs() << "VectorizedValue: ";
1818       if (VectorizedValue)
1819         dbgs() << *VectorizedValue << "\n";
1820       else
1821         dbgs() << "NULL\n";
1822       dbgs() << "ReuseShuffleIndices: ";
1823       if (ReuseShuffleIndices.empty())
1824         dbgs() << "Empty";
1825       else
1826         for (unsigned ReuseIdx : ReuseShuffleIndices)
1827           dbgs() << ReuseIdx << ", ";
1828       dbgs() << "\n";
1829       dbgs() << "ReorderIndices: ";
1830       for (unsigned ReorderIdx : ReorderIndices)
1831         dbgs() << ReorderIdx << ", ";
1832       dbgs() << "\n";
1833       dbgs() << "UserTreeIndices: ";
1834       for (const auto &EInfo : UserTreeIndices)
1835         dbgs() << EInfo << ", ";
1836       dbgs() << "\n";
1837     }
1838 #endif
1839   };
1840 
1841 #ifndef NDEBUG
1842   void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost,
1843                      InstructionCost VecCost,
1844                      InstructionCost ScalarCost) const {
1845     dbgs() << "SLP: Calculated costs for Tree:\n"; E->dump();
1846     dbgs() << "SLP: Costs:\n";
1847     dbgs() << "SLP:     ReuseShuffleCost = " << ReuseShuffleCost << "\n";
1848     dbgs() << "SLP:     VectorCost = " << VecCost << "\n";
1849     dbgs() << "SLP:     ScalarCost = " << ScalarCost << "\n";
1850     dbgs() << "SLP:     ReuseShuffleCost + VecCost - ScalarCost = " <<
1851                ReuseShuffleCost + VecCost - ScalarCost << "\n";
1852   }
1853 #endif
1854 
1855   /// Create a new VectorizableTree entry.
1856   TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle,
1857                           const InstructionsState &S,
1858                           const EdgeInfo &UserTreeIdx,
1859                           ArrayRef<unsigned> ReuseShuffleIndices = None,
1860                           ArrayRef<unsigned> ReorderIndices = None) {
1861     TreeEntry::EntryState EntryState =
1862         Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather;
1863     return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx,
1864                         ReuseShuffleIndices, ReorderIndices);
1865   }
1866 
1867   TreeEntry *newTreeEntry(ArrayRef<Value *> VL,
1868                           TreeEntry::EntryState EntryState,
1869                           Optional<ScheduleData *> Bundle,
1870                           const InstructionsState &S,
1871                           const EdgeInfo &UserTreeIdx,
1872                           ArrayRef<unsigned> ReuseShuffleIndices = None,
1873                           ArrayRef<unsigned> ReorderIndices = None) {
1874     assert(((!Bundle && EntryState == TreeEntry::NeedToGather) ||
1875             (Bundle && EntryState != TreeEntry::NeedToGather)) &&
1876            "Need to vectorize gather entry?");
1877     VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree));
1878     TreeEntry *Last = VectorizableTree.back().get();
1879     Last->Idx = VectorizableTree.size() - 1;
1880     Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end());
1881     Last->State = EntryState;
1882     Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(),
1883                                      ReuseShuffleIndices.end());
1884     Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end());
1885     Last->setOperations(S);
1886     if (Last->State != TreeEntry::NeedToGather) {
1887       for (Value *V : VL) {
1888         assert(!getTreeEntry(V) && "Scalar already in tree!");
1889         ScalarToTreeEntry[V] = Last;
1890       }
1891       // Update the scheduler bundle to point to this TreeEntry.
1892       unsigned Lane = 0;
1893       for (ScheduleData *BundleMember = Bundle.getValue(); BundleMember;
1894            BundleMember = BundleMember->NextInBundle) {
1895         BundleMember->TE = Last;
1896         BundleMember->Lane = Lane;
1897         ++Lane;
1898       }
1899       assert((!Bundle.getValue() || Lane == VL.size()) &&
1900              "Bundle and VL out of sync");
1901     } else {
1902       MustGather.insert(VL.begin(), VL.end());
1903     }
1904 
1905     if (UserTreeIdx.UserTE)
1906       Last->UserTreeIndices.push_back(UserTreeIdx);
1907 
1908     return Last;
1909   }
1910 
1911   /// -- Vectorization State --
1912   /// Holds all of the tree entries.
1913   TreeEntry::VecTreeTy VectorizableTree;
1914 
1915 #ifndef NDEBUG
1916   /// Debug printer.
1917   LLVM_DUMP_METHOD void dumpVectorizableTree() const {
1918     for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) {
1919       VectorizableTree[Id]->dump();
1920       dbgs() << "\n";
1921     }
1922   }
1923 #endif
1924 
1925   TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); }
1926 
1927   const TreeEntry *getTreeEntry(Value *V) const {
1928     return ScalarToTreeEntry.lookup(V);
1929   }
1930 
1931   /// Maps a specific scalar to its tree entry.
1932   SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry;
1933 
1934   /// Maps a value to the proposed vectorizable size.
1935   SmallDenseMap<Value *, unsigned> InstrElementSize;
1936 
1937   /// A list of scalars that we found that we need to keep as scalars.
1938   ValueSet MustGather;
1939 
1940   /// This POD struct describes one external user in the vectorized tree.
1941   struct ExternalUser {
1942     ExternalUser(Value *S, llvm::User *U, int L)
1943         : Scalar(S), User(U), Lane(L) {}
1944 
1945     // Which scalar in our function.
1946     Value *Scalar;
1947 
1948     // Which user that uses the scalar.
1949     llvm::User *User;
1950 
1951     // Which lane does the scalar belong to.
1952     int Lane;
1953   };
1954   using UserList = SmallVector<ExternalUser, 16>;
1955 
1956   /// Checks if two instructions may access the same memory.
1957   ///
1958   /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
1959   /// is invariant in the calling loop.
1960   bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1,
1961                  Instruction *Inst2) {
1962     // First check if the result is already in the cache.
1963     AliasCacheKey key = std::make_pair(Inst1, Inst2);
1964     Optional<bool> &result = AliasCache[key];
1965     if (result.hasValue()) {
1966       return result.getValue();
1967     }
1968     MemoryLocation Loc2 = getLocation(Inst2, AA);
1969     bool aliased = true;
1970     if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) {
1971       // Do the alias check.
1972       aliased = !AA->isNoAlias(Loc1, Loc2);
1973     }
1974     // Store the result in the cache.
1975     result = aliased;
1976     return aliased;
1977   }
1978 
1979   using AliasCacheKey = std::pair<Instruction *, Instruction *>;
1980 
1981   /// Cache for alias results.
1982   /// TODO: consider moving this to the AliasAnalysis itself.
1983   DenseMap<AliasCacheKey, Optional<bool>> AliasCache;
1984 
1985   /// Removes an instruction from its block and eventually deletes it.
1986   /// It's like Instruction::eraseFromParent() except that the actual deletion
1987   /// is delayed until BoUpSLP is destructed.
1988   /// This is required to ensure that there are no incorrect collisions in the
1989   /// AliasCache, which can happen if a new instruction is allocated at the
1990   /// same address as a previously deleted instruction.
1991   void eraseInstruction(Instruction *I, bool ReplaceOpsWithUndef = false) {
1992     auto It = DeletedInstructions.try_emplace(I, ReplaceOpsWithUndef).first;
1993     It->getSecond() = It->getSecond() && ReplaceOpsWithUndef;
1994   }
1995 
1996   /// Temporary store for deleted instructions. Instructions will be deleted
1997   /// eventually when the BoUpSLP is destructed.
1998   DenseMap<Instruction *, bool> DeletedInstructions;
1999 
2000   /// A list of values that need to extracted out of the tree.
2001   /// This list holds pairs of (Internal Scalar : External User). External User
2002   /// can be nullptr, it means that this Internal Scalar will be used later,
2003   /// after vectorization.
2004   UserList ExternalUses;
2005 
2006   /// Values used only by @llvm.assume calls.
2007   SmallPtrSet<const Value *, 32> EphValues;
2008 
2009   /// Holds all of the instructions that we gathered.
2010   SetVector<Instruction *> GatherSeq;
2011 
2012   /// A list of blocks that we are going to CSE.
2013   SetVector<BasicBlock *> CSEBlocks;
2014 
2015   /// Contains all scheduling relevant data for an instruction.
2016   /// A ScheduleData either represents a single instruction or a member of an
2017   /// instruction bundle (= a group of instructions which is combined into a
2018   /// vector instruction).
2019   struct ScheduleData {
2020     // The initial value for the dependency counters. It means that the
2021     // dependencies are not calculated yet.
2022     enum { InvalidDeps = -1 };
2023 
2024     ScheduleData() = default;
2025 
2026     void init(int BlockSchedulingRegionID, Value *OpVal) {
2027       FirstInBundle = this;
2028       NextInBundle = nullptr;
2029       NextLoadStore = nullptr;
2030       IsScheduled = false;
2031       SchedulingRegionID = BlockSchedulingRegionID;
2032       UnscheduledDepsInBundle = UnscheduledDeps;
2033       clearDependencies();
2034       OpValue = OpVal;
2035       TE = nullptr;
2036       Lane = -1;
2037     }
2038 
2039     /// Returns true if the dependency information has been calculated.
2040     bool hasValidDependencies() const { return Dependencies != InvalidDeps; }
2041 
2042     /// Returns true for single instructions and for bundle representatives
2043     /// (= the head of a bundle).
2044     bool isSchedulingEntity() const { return FirstInBundle == this; }
2045 
2046     /// Returns true if it represents an instruction bundle and not only a
2047     /// single instruction.
2048     bool isPartOfBundle() const {
2049       return NextInBundle != nullptr || FirstInBundle != this;
2050     }
2051 
2052     /// Returns true if it is ready for scheduling, i.e. it has no more
2053     /// unscheduled depending instructions/bundles.
2054     bool isReady() const {
2055       assert(isSchedulingEntity() &&
2056              "can't consider non-scheduling entity for ready list");
2057       return UnscheduledDepsInBundle == 0 && !IsScheduled;
2058     }
2059 
2060     /// Modifies the number of unscheduled dependencies, also updating it for
2061     /// the whole bundle.
2062     int incrementUnscheduledDeps(int Incr) {
2063       UnscheduledDeps += Incr;
2064       return FirstInBundle->UnscheduledDepsInBundle += Incr;
2065     }
2066 
2067     /// Sets the number of unscheduled dependencies to the number of
2068     /// dependencies.
2069     void resetUnscheduledDeps() {
2070       incrementUnscheduledDeps(Dependencies - UnscheduledDeps);
2071     }
2072 
2073     /// Clears all dependency information.
2074     void clearDependencies() {
2075       Dependencies = InvalidDeps;
2076       resetUnscheduledDeps();
2077       MemoryDependencies.clear();
2078     }
2079 
2080     void dump(raw_ostream &os) const {
2081       if (!isSchedulingEntity()) {
2082         os << "/ " << *Inst;
2083       } else if (NextInBundle) {
2084         os << '[' << *Inst;
2085         ScheduleData *SD = NextInBundle;
2086         while (SD) {
2087           os << ';' << *SD->Inst;
2088           SD = SD->NextInBundle;
2089         }
2090         os << ']';
2091       } else {
2092         os << *Inst;
2093       }
2094     }
2095 
2096     Instruction *Inst = nullptr;
2097 
2098     /// Points to the head in an instruction bundle (and always to this for
2099     /// single instructions).
2100     ScheduleData *FirstInBundle = nullptr;
2101 
2102     /// Single linked list of all instructions in a bundle. Null if it is a
2103     /// single instruction.
2104     ScheduleData *NextInBundle = nullptr;
2105 
2106     /// Single linked list of all memory instructions (e.g. load, store, call)
2107     /// in the block - until the end of the scheduling region.
2108     ScheduleData *NextLoadStore = nullptr;
2109 
2110     /// The dependent memory instructions.
2111     /// This list is derived on demand in calculateDependencies().
2112     SmallVector<ScheduleData *, 4> MemoryDependencies;
2113 
2114     /// This ScheduleData is in the current scheduling region if this matches
2115     /// the current SchedulingRegionID of BlockScheduling.
2116     int SchedulingRegionID = 0;
2117 
2118     /// Used for getting a "good" final ordering of instructions.
2119     int SchedulingPriority = 0;
2120 
2121     /// The number of dependencies. Constitutes of the number of users of the
2122     /// instruction plus the number of dependent memory instructions (if any).
2123     /// This value is calculated on demand.
2124     /// If InvalidDeps, the number of dependencies is not calculated yet.
2125     int Dependencies = InvalidDeps;
2126 
2127     /// The number of dependencies minus the number of dependencies of scheduled
2128     /// instructions. As soon as this is zero, the instruction/bundle gets ready
2129     /// for scheduling.
2130     /// Note that this is negative as long as Dependencies is not calculated.
2131     int UnscheduledDeps = InvalidDeps;
2132 
2133     /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for
2134     /// single instructions.
2135     int UnscheduledDepsInBundle = InvalidDeps;
2136 
2137     /// True if this instruction is scheduled (or considered as scheduled in the
2138     /// dry-run).
2139     bool IsScheduled = false;
2140 
2141     /// Opcode of the current instruction in the schedule data.
2142     Value *OpValue = nullptr;
2143 
2144     /// The TreeEntry that this instruction corresponds to.
2145     TreeEntry *TE = nullptr;
2146 
2147     /// The lane of this node in the TreeEntry.
2148     int Lane = -1;
2149   };
2150 
2151 #ifndef NDEBUG
2152   friend inline raw_ostream &operator<<(raw_ostream &os,
2153                                         const BoUpSLP::ScheduleData &SD) {
2154     SD.dump(os);
2155     return os;
2156   }
2157 #endif
2158 
2159   friend struct GraphTraits<BoUpSLP *>;
2160   friend struct DOTGraphTraits<BoUpSLP *>;
2161 
2162   /// Contains all scheduling data for a basic block.
2163   struct BlockScheduling {
2164     BlockScheduling(BasicBlock *BB)
2165         : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {}
2166 
2167     void clear() {
2168       ReadyInsts.clear();
2169       ScheduleStart = nullptr;
2170       ScheduleEnd = nullptr;
2171       FirstLoadStoreInRegion = nullptr;
2172       LastLoadStoreInRegion = nullptr;
2173 
2174       // Reduce the maximum schedule region size by the size of the
2175       // previous scheduling run.
2176       ScheduleRegionSizeLimit -= ScheduleRegionSize;
2177       if (ScheduleRegionSizeLimit < MinScheduleRegionSize)
2178         ScheduleRegionSizeLimit = MinScheduleRegionSize;
2179       ScheduleRegionSize = 0;
2180 
2181       // Make a new scheduling region, i.e. all existing ScheduleData is not
2182       // in the new region yet.
2183       ++SchedulingRegionID;
2184     }
2185 
2186     ScheduleData *getScheduleData(Value *V) {
2187       ScheduleData *SD = ScheduleDataMap[V];
2188       if (SD && SD->SchedulingRegionID == SchedulingRegionID)
2189         return SD;
2190       return nullptr;
2191     }
2192 
2193     ScheduleData *getScheduleData(Value *V, Value *Key) {
2194       if (V == Key)
2195         return getScheduleData(V);
2196       auto I = ExtraScheduleDataMap.find(V);
2197       if (I != ExtraScheduleDataMap.end()) {
2198         ScheduleData *SD = I->second[Key];
2199         if (SD && SD->SchedulingRegionID == SchedulingRegionID)
2200           return SD;
2201       }
2202       return nullptr;
2203     }
2204 
2205     bool isInSchedulingRegion(ScheduleData *SD) const {
2206       return SD->SchedulingRegionID == SchedulingRegionID;
2207     }
2208 
2209     /// Marks an instruction as scheduled and puts all dependent ready
2210     /// instructions into the ready-list.
2211     template <typename ReadyListType>
2212     void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
2213       SD->IsScheduled = true;
2214       LLVM_DEBUG(dbgs() << "SLP:   schedule " << *SD << "\n");
2215 
2216       ScheduleData *BundleMember = SD;
2217       while (BundleMember) {
2218         if (BundleMember->Inst != BundleMember->OpValue) {
2219           BundleMember = BundleMember->NextInBundle;
2220           continue;
2221         }
2222         // Handle the def-use chain dependencies.
2223 
2224         // Decrement the unscheduled counter and insert to ready list if ready.
2225         auto &&DecrUnsched = [this, &ReadyList](Instruction *I) {
2226           doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) {
2227             if (OpDef && OpDef->hasValidDependencies() &&
2228                 OpDef->incrementUnscheduledDeps(-1) == 0) {
2229               // There are no more unscheduled dependencies after
2230               // decrementing, so we can put the dependent instruction
2231               // into the ready list.
2232               ScheduleData *DepBundle = OpDef->FirstInBundle;
2233               assert(!DepBundle->IsScheduled &&
2234                      "already scheduled bundle gets ready");
2235               ReadyList.insert(DepBundle);
2236               LLVM_DEBUG(dbgs()
2237                          << "SLP:    gets ready (def): " << *DepBundle << "\n");
2238             }
2239           });
2240         };
2241 
2242         // If BundleMember is a vector bundle, its operands may have been
2243         // reordered duiring buildTree(). We therefore need to get its operands
2244         // through the TreeEntry.
2245         if (TreeEntry *TE = BundleMember->TE) {
2246           int Lane = BundleMember->Lane;
2247           assert(Lane >= 0 && "Lane not set");
2248 
2249           // Since vectorization tree is being built recursively this assertion
2250           // ensures that the tree entry has all operands set before reaching
2251           // this code. Couple of exceptions known at the moment are extracts
2252           // where their second (immediate) operand is not added. Since
2253           // immediates do not affect scheduler behavior this is considered
2254           // okay.
2255           auto *In = TE->getMainOp();
2256           assert(In &&
2257                  (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) ||
2258                   In->getNumOperands() == TE->getNumOperands()) &&
2259                  "Missed TreeEntry operands?");
2260           (void)In; // fake use to avoid build failure when assertions disabled
2261 
2262           for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands();
2263                OpIdx != NumOperands; ++OpIdx)
2264             if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane]))
2265               DecrUnsched(I);
2266         } else {
2267           // If BundleMember is a stand-alone instruction, no operand reordering
2268           // has taken place, so we directly access its operands.
2269           for (Use &U : BundleMember->Inst->operands())
2270             if (auto *I = dyn_cast<Instruction>(U.get()))
2271               DecrUnsched(I);
2272         }
2273         // Handle the memory dependencies.
2274         for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) {
2275           if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) {
2276             // There are no more unscheduled dependencies after decrementing,
2277             // so we can put the dependent instruction into the ready list.
2278             ScheduleData *DepBundle = MemoryDepSD->FirstInBundle;
2279             assert(!DepBundle->IsScheduled &&
2280                    "already scheduled bundle gets ready");
2281             ReadyList.insert(DepBundle);
2282             LLVM_DEBUG(dbgs()
2283                        << "SLP:    gets ready (mem): " << *DepBundle << "\n");
2284           }
2285         }
2286         BundleMember = BundleMember->NextInBundle;
2287       }
2288     }
2289 
2290     void doForAllOpcodes(Value *V,
2291                          function_ref<void(ScheduleData *SD)> Action) {
2292       if (ScheduleData *SD = getScheduleData(V))
2293         Action(SD);
2294       auto I = ExtraScheduleDataMap.find(V);
2295       if (I != ExtraScheduleDataMap.end())
2296         for (auto &P : I->second)
2297           if (P.second->SchedulingRegionID == SchedulingRegionID)
2298             Action(P.second);
2299     }
2300 
2301     /// Put all instructions into the ReadyList which are ready for scheduling.
2302     template <typename ReadyListType>
2303     void initialFillReadyList(ReadyListType &ReadyList) {
2304       for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
2305         doForAllOpcodes(I, [&](ScheduleData *SD) {
2306           if (SD->isSchedulingEntity() && SD->isReady()) {
2307             ReadyList.insert(SD);
2308             LLVM_DEBUG(dbgs()
2309                        << "SLP:    initially in ready list: " << *I << "\n");
2310           }
2311         });
2312       }
2313     }
2314 
2315     /// Checks if a bundle of instructions can be scheduled, i.e. has no
2316     /// cyclic dependencies. This is only a dry-run, no instructions are
2317     /// actually moved at this stage.
2318     /// \returns the scheduling bundle. The returned Optional value is non-None
2319     /// if \p VL is allowed to be scheduled.
2320     Optional<ScheduleData *>
2321     tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
2322                       const InstructionsState &S);
2323 
2324     /// Un-bundles a group of instructions.
2325     void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue);
2326 
2327     /// Allocates schedule data chunk.
2328     ScheduleData *allocateScheduleDataChunks();
2329 
2330     /// Extends the scheduling region so that V is inside the region.
2331     /// \returns true if the region size is within the limit.
2332     bool extendSchedulingRegion(Value *V, const InstructionsState &S);
2333 
2334     /// Initialize the ScheduleData structures for new instructions in the
2335     /// scheduling region.
2336     void initScheduleData(Instruction *FromI, Instruction *ToI,
2337                           ScheduleData *PrevLoadStore,
2338                           ScheduleData *NextLoadStore);
2339 
2340     /// Updates the dependency information of a bundle and of all instructions/
2341     /// bundles which depend on the original bundle.
2342     void calculateDependencies(ScheduleData *SD, bool InsertInReadyList,
2343                                BoUpSLP *SLP);
2344 
2345     /// Sets all instruction in the scheduling region to un-scheduled.
2346     void resetSchedule();
2347 
2348     BasicBlock *BB;
2349 
2350     /// Simple memory allocation for ScheduleData.
2351     std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks;
2352 
2353     /// The size of a ScheduleData array in ScheduleDataChunks.
2354     int ChunkSize;
2355 
2356     /// The allocator position in the current chunk, which is the last entry
2357     /// of ScheduleDataChunks.
2358     int ChunkPos;
2359 
2360     /// Attaches ScheduleData to Instruction.
2361     /// Note that the mapping survives during all vectorization iterations, i.e.
2362     /// ScheduleData structures are recycled.
2363     DenseMap<Value *, ScheduleData *> ScheduleDataMap;
2364 
2365     /// Attaches ScheduleData to Instruction with the leading key.
2366     DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>>
2367         ExtraScheduleDataMap;
2368 
2369     struct ReadyList : SmallVector<ScheduleData *, 8> {
2370       void insert(ScheduleData *SD) { push_back(SD); }
2371     };
2372 
2373     /// The ready-list for scheduling (only used for the dry-run).
2374     ReadyList ReadyInsts;
2375 
2376     /// The first instruction of the scheduling region.
2377     Instruction *ScheduleStart = nullptr;
2378 
2379     /// The first instruction _after_ the scheduling region.
2380     Instruction *ScheduleEnd = nullptr;
2381 
2382     /// The first memory accessing instruction in the scheduling region
2383     /// (can be null).
2384     ScheduleData *FirstLoadStoreInRegion = nullptr;
2385 
2386     /// The last memory accessing instruction in the scheduling region
2387     /// (can be null).
2388     ScheduleData *LastLoadStoreInRegion = nullptr;
2389 
2390     /// The current size of the scheduling region.
2391     int ScheduleRegionSize = 0;
2392 
2393     /// The maximum size allowed for the scheduling region.
2394     int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget;
2395 
2396     /// The ID of the scheduling region. For a new vectorization iteration this
2397     /// is incremented which "removes" all ScheduleData from the region.
2398     // Make sure that the initial SchedulingRegionID is greater than the
2399     // initial SchedulingRegionID in ScheduleData (which is 0).
2400     int SchedulingRegionID = 1;
2401   };
2402 
2403   /// Attaches the BlockScheduling structures to basic blocks.
2404   MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules;
2405 
2406   /// Performs the "real" scheduling. Done before vectorization is actually
2407   /// performed in a basic block.
2408   void scheduleBlock(BlockScheduling *BS);
2409 
2410   /// List of users to ignore during scheduling and that don't need extracting.
2411   ArrayRef<Value *> UserIgnoreList;
2412 
2413   /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of
2414   /// sorted SmallVectors of unsigned.
2415   struct OrdersTypeDenseMapInfo {
2416     static OrdersType getEmptyKey() {
2417       OrdersType V;
2418       V.push_back(~1U);
2419       return V;
2420     }
2421 
2422     static OrdersType getTombstoneKey() {
2423       OrdersType V;
2424       V.push_back(~2U);
2425       return V;
2426     }
2427 
2428     static unsigned getHashValue(const OrdersType &V) {
2429       return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
2430     }
2431 
2432     static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) {
2433       return LHS == RHS;
2434     }
2435   };
2436 
2437   /// Contains orders of operations along with the number of bundles that have
2438   /// operations in this order. It stores only those orders that require
2439   /// reordering, if reordering is not required it is counted using \a
2440   /// NumOpsWantToKeepOriginalOrder.
2441   DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo> NumOpsWantToKeepOrder;
2442   /// Number of bundles that do not require reordering.
2443   unsigned NumOpsWantToKeepOriginalOrder = 0;
2444 
2445   // Analysis and block reference.
2446   Function *F;
2447   ScalarEvolution *SE;
2448   TargetTransformInfo *TTI;
2449   TargetLibraryInfo *TLI;
2450   AAResults *AA;
2451   LoopInfo *LI;
2452   DominatorTree *DT;
2453   AssumptionCache *AC;
2454   DemandedBits *DB;
2455   const DataLayout *DL;
2456   OptimizationRemarkEmitter *ORE;
2457 
2458   unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt.
2459   unsigned MinVecRegSize; // Set by cl::opt (default: 128).
2460 
2461   /// Instruction builder to construct the vectorized tree.
2462   IRBuilder<> Builder;
2463 
2464   /// A map of scalar integer values to the smallest bit width with which they
2465   /// can legally be represented. The values map to (width, signed) pairs,
2466   /// where "width" indicates the minimum bit width and "signed" is True if the
2467   /// value must be signed-extended, rather than zero-extended, back to its
2468   /// original width.
2469   MapVector<Value *, std::pair<uint64_t, bool>> MinBWs;
2470 };
2471 
2472 } // end namespace slpvectorizer
2473 
2474 template <> struct GraphTraits<BoUpSLP *> {
2475   using TreeEntry = BoUpSLP::TreeEntry;
2476 
2477   /// NodeRef has to be a pointer per the GraphWriter.
2478   using NodeRef = TreeEntry *;
2479 
2480   using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy;
2481 
2482   /// Add the VectorizableTree to the index iterator to be able to return
2483   /// TreeEntry pointers.
2484   struct ChildIteratorType
2485       : public iterator_adaptor_base<
2486             ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> {
2487     ContainerTy &VectorizableTree;
2488 
2489     ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W,
2490                       ContainerTy &VT)
2491         : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {}
2492 
2493     NodeRef operator*() { return I->UserTE; }
2494   };
2495 
2496   static NodeRef getEntryNode(BoUpSLP &R) {
2497     return R.VectorizableTree[0].get();
2498   }
2499 
2500   static ChildIteratorType child_begin(NodeRef N) {
2501     return {N->UserTreeIndices.begin(), N->Container};
2502   }
2503 
2504   static ChildIteratorType child_end(NodeRef N) {
2505     return {N->UserTreeIndices.end(), N->Container};
2506   }
2507 
2508   /// For the node iterator we just need to turn the TreeEntry iterator into a
2509   /// TreeEntry* iterator so that it dereferences to NodeRef.
2510   class nodes_iterator {
2511     using ItTy = ContainerTy::iterator;
2512     ItTy It;
2513 
2514   public:
2515     nodes_iterator(const ItTy &It2) : It(It2) {}
2516     NodeRef operator*() { return It->get(); }
2517     nodes_iterator operator++() {
2518       ++It;
2519       return *this;
2520     }
2521     bool operator!=(const nodes_iterator &N2) const { return N2.It != It; }
2522   };
2523 
2524   static nodes_iterator nodes_begin(BoUpSLP *R) {
2525     return nodes_iterator(R->VectorizableTree.begin());
2526   }
2527 
2528   static nodes_iterator nodes_end(BoUpSLP *R) {
2529     return nodes_iterator(R->VectorizableTree.end());
2530   }
2531 
2532   static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); }
2533 };
2534 
2535 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits {
2536   using TreeEntry = BoUpSLP::TreeEntry;
2537 
2538   DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
2539 
2540   std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) {
2541     std::string Str;
2542     raw_string_ostream OS(Str);
2543     if (isSplat(Entry->Scalars)) {
2544       OS << "<splat> " << *Entry->Scalars[0];
2545       return Str;
2546     }
2547     for (auto V : Entry->Scalars) {
2548       OS << *V;
2549       if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) {
2550             return EU.Scalar == V;
2551           }))
2552         OS << " <extract>";
2553       OS << "\n";
2554     }
2555     return Str;
2556   }
2557 
2558   static std::string getNodeAttributes(const TreeEntry *Entry,
2559                                        const BoUpSLP *) {
2560     if (Entry->State == TreeEntry::NeedToGather)
2561       return "color=red";
2562     return "";
2563   }
2564 };
2565 
2566 } // end namespace llvm
2567 
2568 BoUpSLP::~BoUpSLP() {
2569   for (const auto &Pair : DeletedInstructions) {
2570     // Replace operands of ignored instructions with Undefs in case if they were
2571     // marked for deletion.
2572     if (Pair.getSecond()) {
2573       Value *Undef = UndefValue::get(Pair.getFirst()->getType());
2574       Pair.getFirst()->replaceAllUsesWith(Undef);
2575     }
2576     Pair.getFirst()->dropAllReferences();
2577   }
2578   for (const auto &Pair : DeletedInstructions) {
2579     assert(Pair.getFirst()->use_empty() &&
2580            "trying to erase instruction with users.");
2581     Pair.getFirst()->eraseFromParent();
2582   }
2583 #ifdef EXPENSIVE_CHECKS
2584   // If we could guarantee that this call is not extremely slow, we could
2585   // remove the ifdef limitation (see PR47712).
2586   assert(!verifyFunction(*F, &dbgs()));
2587 #endif
2588 }
2589 
2590 void BoUpSLP::eraseInstructions(ArrayRef<Value *> AV) {
2591   for (auto *V : AV) {
2592     if (auto *I = dyn_cast<Instruction>(V))
2593       eraseInstruction(I, /*ReplaceOpsWithUndef=*/true);
2594   };
2595 }
2596 
2597 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
2598                         ArrayRef<Value *> UserIgnoreLst) {
2599   ExtraValueToDebugLocsMap ExternallyUsedValues;
2600   buildTree(Roots, ExternallyUsedValues, UserIgnoreLst);
2601 }
2602 
2603 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
2604                         ExtraValueToDebugLocsMap &ExternallyUsedValues,
2605                         ArrayRef<Value *> UserIgnoreLst) {
2606   deleteTree();
2607   UserIgnoreList = UserIgnoreLst;
2608   if (!allSameType(Roots))
2609     return;
2610   buildTree_rec(Roots, 0, EdgeInfo());
2611 
2612   // Collect the values that we need to extract from the tree.
2613   for (auto &TEPtr : VectorizableTree) {
2614     TreeEntry *Entry = TEPtr.get();
2615 
2616     // No need to handle users of gathered values.
2617     if (Entry->State == TreeEntry::NeedToGather)
2618       continue;
2619 
2620     // For each lane:
2621     for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
2622       Value *Scalar = Entry->Scalars[Lane];
2623       int FoundLane = Entry->findLaneForValue(Scalar);
2624 
2625       // Check if the scalar is externally used as an extra arg.
2626       auto ExtI = ExternallyUsedValues.find(Scalar);
2627       if (ExtI != ExternallyUsedValues.end()) {
2628         LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane "
2629                           << Lane << " from " << *Scalar << ".\n");
2630         ExternalUses.emplace_back(Scalar, nullptr, FoundLane);
2631       }
2632       for (User *U : Scalar->users()) {
2633         LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
2634 
2635         Instruction *UserInst = dyn_cast<Instruction>(U);
2636         if (!UserInst)
2637           continue;
2638 
2639         if (isDeleted(UserInst))
2640           continue;
2641 
2642         // Skip in-tree scalars that become vectors
2643         if (TreeEntry *UseEntry = getTreeEntry(U)) {
2644           Value *UseScalar = UseEntry->Scalars[0];
2645           // Some in-tree scalars will remain as scalar in vectorized
2646           // instructions. If that is the case, the one in Lane 0 will
2647           // be used.
2648           if (UseScalar != U ||
2649               UseEntry->State == TreeEntry::ScatterVectorize ||
2650               !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) {
2651             LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
2652                               << ".\n");
2653             assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state");
2654             continue;
2655           }
2656         }
2657 
2658         // Ignore users in the user ignore list.
2659         if (is_contained(UserIgnoreList, UserInst))
2660           continue;
2661 
2662         LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane "
2663                           << Lane << " from " << *Scalar << ".\n");
2664         ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane));
2665       }
2666     }
2667   }
2668 }
2669 
2670 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
2671                             const EdgeInfo &UserTreeIdx) {
2672   assert((allConstant(VL) || allSameType(VL)) && "Invalid types!");
2673 
2674   InstructionsState S = getSameOpcode(VL);
2675   if (Depth == RecursionMaxDepth) {
2676     LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
2677     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2678     return;
2679   }
2680 
2681   // Don't handle scalable vectors
2682   if (S.getOpcode() == Instruction::ExtractElement &&
2683       isa<ScalableVectorType>(
2684           cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) {
2685     LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n");
2686     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2687     return;
2688   }
2689 
2690   // Don't handle vectors.
2691   if (S.OpValue->getType()->isVectorTy() &&
2692       !isa<InsertElementInst>(S.OpValue)) {
2693     LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
2694     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2695     return;
2696   }
2697 
2698   if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue))
2699     if (SI->getValueOperand()->getType()->isVectorTy()) {
2700       LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
2701       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2702       return;
2703     }
2704 
2705   // If all of the operands are identical or constant we have a simple solution.
2706   if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.getOpcode()) {
2707     LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
2708     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2709     return;
2710   }
2711 
2712   // We now know that this is a vector of instructions of the same type from
2713   // the same block.
2714 
2715   // Don't vectorize ephemeral values.
2716   for (Value *V : VL) {
2717     if (EphValues.count(V)) {
2718       LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
2719                         << ") is ephemeral.\n");
2720       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2721       return;
2722     }
2723   }
2724 
2725   // Check if this is a duplicate of another entry.
2726   if (TreeEntry *E = getTreeEntry(S.OpValue)) {
2727     LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n");
2728     if (!E->isSame(VL)) {
2729       LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
2730       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2731       return;
2732     }
2733     // Record the reuse of the tree node.  FIXME, currently this is only used to
2734     // properly draw the graph rather than for the actual vectorization.
2735     E->UserTreeIndices.push_back(UserTreeIdx);
2736     LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue
2737                       << ".\n");
2738     return;
2739   }
2740 
2741   // Check that none of the instructions in the bundle are already in the tree.
2742   for (Value *V : VL) {
2743     auto *I = dyn_cast<Instruction>(V);
2744     if (!I)
2745       continue;
2746     if (getTreeEntry(I)) {
2747       LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
2748                         << ") is already in tree.\n");
2749       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2750       return;
2751     }
2752   }
2753 
2754   // If any of the scalars is marked as a value that needs to stay scalar, then
2755   // we need to gather the scalars.
2756   // The reduction nodes (stored in UserIgnoreList) also should stay scalar.
2757   for (Value *V : VL) {
2758     if (MustGather.count(V) || is_contained(UserIgnoreList, V)) {
2759       LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
2760       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2761       return;
2762     }
2763   }
2764 
2765   // Check that all of the users of the scalars that we want to vectorize are
2766   // schedulable.
2767   auto *VL0 = cast<Instruction>(S.OpValue);
2768   BasicBlock *BB = VL0->getParent();
2769 
2770   if (!DT->isReachableFromEntry(BB)) {
2771     // Don't go into unreachable blocks. They may contain instructions with
2772     // dependency cycles which confuse the final scheduling.
2773     LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
2774     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2775     return;
2776   }
2777 
2778   // Check that every instruction appears once in this bundle.
2779   SmallVector<unsigned, 4> ReuseShuffleIndicies;
2780   SmallVector<Value *, 4> UniqueValues;
2781   DenseMap<Value *, unsigned> UniquePositions;
2782   for (Value *V : VL) {
2783     auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
2784     ReuseShuffleIndicies.emplace_back(Res.first->second);
2785     if (Res.second)
2786       UniqueValues.emplace_back(V);
2787   }
2788   size_t NumUniqueScalarValues = UniqueValues.size();
2789   if (NumUniqueScalarValues == VL.size()) {
2790     ReuseShuffleIndicies.clear();
2791   } else {
2792     LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n");
2793     if (NumUniqueScalarValues <= 1 ||
2794         !llvm::isPowerOf2_32(NumUniqueScalarValues)) {
2795       LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
2796       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
2797       return;
2798     }
2799     VL = UniqueValues;
2800   }
2801 
2802   auto &BSRef = BlocksSchedules[BB];
2803   if (!BSRef)
2804     BSRef = std::make_unique<BlockScheduling>(BB);
2805 
2806   BlockScheduling &BS = *BSRef.get();
2807 
2808   Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S);
2809   if (!Bundle) {
2810     LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
2811     assert((!BS.getScheduleData(VL0) ||
2812             !BS.getScheduleData(VL0)->isPartOfBundle()) &&
2813            "tryScheduleBundle should cancelScheduling on failure");
2814     newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2815                  ReuseShuffleIndicies);
2816     return;
2817   }
2818   LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
2819 
2820   unsigned ShuffleOrOp = S.isAltShuffle() ?
2821                 (unsigned) Instruction::ShuffleVector : S.getOpcode();
2822   switch (ShuffleOrOp) {
2823     case Instruction::PHI: {
2824       auto *PH = cast<PHINode>(VL0);
2825 
2826       // Check for terminator values (e.g. invoke).
2827       for (Value *V : VL)
2828         for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) {
2829           Instruction *Term = dyn_cast<Instruction>(
2830               cast<PHINode>(V)->getIncomingValueForBlock(
2831                   PH->getIncomingBlock(I)));
2832           if (Term && Term->isTerminator()) {
2833             LLVM_DEBUG(dbgs()
2834                        << "SLP: Need to swizzle PHINodes (terminator use).\n");
2835             BS.cancelScheduling(VL, VL0);
2836             newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2837                          ReuseShuffleIndicies);
2838             return;
2839           }
2840         }
2841 
2842       TreeEntry *TE =
2843           newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies);
2844       LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
2845 
2846       // Keeps the reordered operands to avoid code duplication.
2847       SmallVector<ValueList, 2> OperandsVec;
2848       for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) {
2849         if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) {
2850           ValueList Operands(VL.size(), PoisonValue::get(PH->getType()));
2851           TE->setOperand(I, Operands);
2852           OperandsVec.push_back(Operands);
2853           continue;
2854         }
2855         ValueList Operands;
2856         // Prepare the operand vector.
2857         for (Value *V : VL)
2858           Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(
2859               PH->getIncomingBlock(I)));
2860         TE->setOperand(I, Operands);
2861         OperandsVec.push_back(Operands);
2862       }
2863       for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx)
2864         buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx});
2865       return;
2866     }
2867     case Instruction::ExtractValue:
2868     case Instruction::ExtractElement: {
2869       OrdersType CurrentOrder;
2870       bool Reuse = canReuseExtract(VL, VL0, CurrentOrder);
2871       if (Reuse) {
2872         LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n");
2873         ++NumOpsWantToKeepOriginalOrder;
2874         newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2875                      ReuseShuffleIndicies);
2876         // This is a special case, as it does not gather, but at the same time
2877         // we are not extending buildTree_rec() towards the operands.
2878         ValueList Op0;
2879         Op0.assign(VL.size(), VL0->getOperand(0));
2880         VectorizableTree.back()->setOperand(0, Op0);
2881         return;
2882       }
2883       if (!CurrentOrder.empty()) {
2884         LLVM_DEBUG({
2885           dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
2886                     "with order";
2887           for (unsigned Idx : CurrentOrder)
2888             dbgs() << " " << Idx;
2889           dbgs() << "\n";
2890         });
2891         // Insert new order with initial value 0, if it does not exist,
2892         // otherwise return the iterator to the existing one.
2893         newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
2894                      ReuseShuffleIndicies, CurrentOrder);
2895         findRootOrder(CurrentOrder);
2896         ++NumOpsWantToKeepOrder[CurrentOrder];
2897         // This is a special case, as it does not gather, but at the same time
2898         // we are not extending buildTree_rec() towards the operands.
2899         ValueList Op0;
2900         Op0.assign(VL.size(), VL0->getOperand(0));
2901         VectorizableTree.back()->setOperand(0, Op0);
2902         return;
2903       }
2904       LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n");
2905       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2906                    ReuseShuffleIndicies);
2907       BS.cancelScheduling(VL, VL0);
2908       return;
2909     }
2910     case Instruction::InsertElement: {
2911       assert(ReuseShuffleIndicies.empty() && "All inserts should be unique");
2912 
2913       // Check that we have a buildvector and not a shuffle of 2 or more
2914       // different vectors.
2915       ValueSet SourceVectors;
2916       for (Value *V : VL)
2917         SourceVectors.insert(cast<Instruction>(V)->getOperand(0));
2918 
2919       if (count_if(VL, [&SourceVectors](Value *V) {
2920             return !SourceVectors.contains(V);
2921           }) >= 2) {
2922         // Found 2nd source vector - cancel.
2923         LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with "
2924                              "different source vectors.\n");
2925         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2926                      ReuseShuffleIndicies);
2927         BS.cancelScheduling(VL, VL0);
2928         return;
2929       }
2930 
2931       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx);
2932       LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n");
2933 
2934       constexpr int NumOps = 2;
2935       ValueList VectorOperands[NumOps];
2936       for (int I = 0; I < NumOps; ++I) {
2937         for (Value *V : VL)
2938           VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I));
2939 
2940         TE->setOperand(I, VectorOperands[I]);
2941       }
2942       buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, 0});
2943       return;
2944     }
2945     case Instruction::Load: {
2946       // Check that a vectorized load would load the same memory as a scalar
2947       // load. For example, we don't want to vectorize loads that are smaller
2948       // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
2949       // treats loading/storing it as an i8 struct. If we vectorize loads/stores
2950       // from such a struct, we read/write packed bits disagreeing with the
2951       // unvectorized version.
2952       Type *ScalarTy = VL0->getType();
2953 
2954       if (DL->getTypeSizeInBits(ScalarTy) !=
2955           DL->getTypeAllocSizeInBits(ScalarTy)) {
2956         BS.cancelScheduling(VL, VL0);
2957         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2958                      ReuseShuffleIndicies);
2959         LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
2960         return;
2961       }
2962 
2963       // Make sure all loads in the bundle are simple - we can't vectorize
2964       // atomic or volatile loads.
2965       SmallVector<Value *, 4> PointerOps(VL.size());
2966       auto POIter = PointerOps.begin();
2967       for (Value *V : VL) {
2968         auto *L = cast<LoadInst>(V);
2969         if (!L->isSimple()) {
2970           BS.cancelScheduling(VL, VL0);
2971           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
2972                        ReuseShuffleIndicies);
2973           LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
2974           return;
2975         }
2976         *POIter = L->getPointerOperand();
2977         ++POIter;
2978       }
2979 
2980       OrdersType CurrentOrder;
2981       // Check the order of pointer operands.
2982       if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) {
2983         Value *Ptr0;
2984         Value *PtrN;
2985         if (CurrentOrder.empty()) {
2986           Ptr0 = PointerOps.front();
2987           PtrN = PointerOps.back();
2988         } else {
2989           Ptr0 = PointerOps[CurrentOrder.front()];
2990           PtrN = PointerOps[CurrentOrder.back()];
2991         }
2992         Optional<int> Diff = getPointersDiff(
2993             ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE);
2994         // Check that the sorted loads are consecutive.
2995         if (static_cast<unsigned>(*Diff) == VL.size() - 1) {
2996           if (CurrentOrder.empty()) {
2997             // Original loads are consecutive and does not require reordering.
2998             ++NumOpsWantToKeepOriginalOrder;
2999             TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S,
3000                                          UserTreeIdx, ReuseShuffleIndicies);
3001             TE->setOperandsInOrder();
3002             LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n");
3003           } else {
3004             // Need to reorder.
3005             TreeEntry *TE =
3006                 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3007                              ReuseShuffleIndicies, CurrentOrder);
3008             TE->setOperandsInOrder();
3009             LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n");
3010             findRootOrder(CurrentOrder);
3011             ++NumOpsWantToKeepOrder[CurrentOrder];
3012           }
3013           return;
3014         }
3015         Align CommonAlignment = cast<LoadInst>(VL0)->getAlign();
3016         for (Value *V : VL)
3017           CommonAlignment =
3018               commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign());
3019         if (TTI->isLegalMaskedGather(FixedVectorType::get(ScalarTy, VL.size()),
3020                                      CommonAlignment)) {
3021           // Vectorizing non-consecutive loads with `llvm.masked.gather`.
3022           TreeEntry *TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle,
3023                                        S, UserTreeIdx, ReuseShuffleIndicies);
3024           TE->setOperandsInOrder();
3025           buildTree_rec(PointerOps, Depth + 1, {TE, 0});
3026           LLVM_DEBUG(dbgs()
3027                      << "SLP: added a vector of non-consecutive loads.\n");
3028           return;
3029         }
3030       }
3031 
3032       LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
3033       BS.cancelScheduling(VL, VL0);
3034       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3035                    ReuseShuffleIndicies);
3036       return;
3037     }
3038     case Instruction::ZExt:
3039     case Instruction::SExt:
3040     case Instruction::FPToUI:
3041     case Instruction::FPToSI:
3042     case Instruction::FPExt:
3043     case Instruction::PtrToInt:
3044     case Instruction::IntToPtr:
3045     case Instruction::SIToFP:
3046     case Instruction::UIToFP:
3047     case Instruction::Trunc:
3048     case Instruction::FPTrunc:
3049     case Instruction::BitCast: {
3050       Type *SrcTy = VL0->getOperand(0)->getType();
3051       for (Value *V : VL) {
3052         Type *Ty = cast<Instruction>(V)->getOperand(0)->getType();
3053         if (Ty != SrcTy || !isValidElementType(Ty)) {
3054           BS.cancelScheduling(VL, VL0);
3055           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3056                        ReuseShuffleIndicies);
3057           LLVM_DEBUG(dbgs()
3058                      << "SLP: Gathering casts with different src types.\n");
3059           return;
3060         }
3061       }
3062       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3063                                    ReuseShuffleIndicies);
3064       LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n");
3065 
3066       TE->setOperandsInOrder();
3067       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
3068         ValueList Operands;
3069         // Prepare the operand vector.
3070         for (Value *V : VL)
3071           Operands.push_back(cast<Instruction>(V)->getOperand(i));
3072 
3073         buildTree_rec(Operands, Depth + 1, {TE, i});
3074       }
3075       return;
3076     }
3077     case Instruction::ICmp:
3078     case Instruction::FCmp: {
3079       // Check that all of the compares have the same predicate.
3080       CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
3081       CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0);
3082       Type *ComparedTy = VL0->getOperand(0)->getType();
3083       for (Value *V : VL) {
3084         CmpInst *Cmp = cast<CmpInst>(V);
3085         if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) ||
3086             Cmp->getOperand(0)->getType() != ComparedTy) {
3087           BS.cancelScheduling(VL, VL0);
3088           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3089                        ReuseShuffleIndicies);
3090           LLVM_DEBUG(dbgs()
3091                      << "SLP: Gathering cmp with different predicate.\n");
3092           return;
3093         }
3094       }
3095 
3096       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3097                                    ReuseShuffleIndicies);
3098       LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n");
3099 
3100       ValueList Left, Right;
3101       if (cast<CmpInst>(VL0)->isCommutative()) {
3102         // Commutative predicate - collect + sort operands of the instructions
3103         // so that each side is more likely to have the same opcode.
3104         assert(P0 == SwapP0 && "Commutative Predicate mismatch");
3105         reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
3106       } else {
3107         // Collect operands - commute if it uses the swapped predicate.
3108         for (Value *V : VL) {
3109           auto *Cmp = cast<CmpInst>(V);
3110           Value *LHS = Cmp->getOperand(0);
3111           Value *RHS = Cmp->getOperand(1);
3112           if (Cmp->getPredicate() != P0)
3113             std::swap(LHS, RHS);
3114           Left.push_back(LHS);
3115           Right.push_back(RHS);
3116         }
3117       }
3118       TE->setOperand(0, Left);
3119       TE->setOperand(1, Right);
3120       buildTree_rec(Left, Depth + 1, {TE, 0});
3121       buildTree_rec(Right, Depth + 1, {TE, 1});
3122       return;
3123     }
3124     case Instruction::Select:
3125     case Instruction::FNeg:
3126     case Instruction::Add:
3127     case Instruction::FAdd:
3128     case Instruction::Sub:
3129     case Instruction::FSub:
3130     case Instruction::Mul:
3131     case Instruction::FMul:
3132     case Instruction::UDiv:
3133     case Instruction::SDiv:
3134     case Instruction::FDiv:
3135     case Instruction::URem:
3136     case Instruction::SRem:
3137     case Instruction::FRem:
3138     case Instruction::Shl:
3139     case Instruction::LShr:
3140     case Instruction::AShr:
3141     case Instruction::And:
3142     case Instruction::Or:
3143     case Instruction::Xor: {
3144       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3145                                    ReuseShuffleIndicies);
3146       LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n");
3147 
3148       // Sort operands of the instructions so that each side is more likely to
3149       // have the same opcode.
3150       if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
3151         ValueList Left, Right;
3152         reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
3153         TE->setOperand(0, Left);
3154         TE->setOperand(1, Right);
3155         buildTree_rec(Left, Depth + 1, {TE, 0});
3156         buildTree_rec(Right, Depth + 1, {TE, 1});
3157         return;
3158       }
3159 
3160       TE->setOperandsInOrder();
3161       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
3162         ValueList Operands;
3163         // Prepare the operand vector.
3164         for (Value *V : VL)
3165           Operands.push_back(cast<Instruction>(V)->getOperand(i));
3166 
3167         buildTree_rec(Operands, Depth + 1, {TE, i});
3168       }
3169       return;
3170     }
3171     case Instruction::GetElementPtr: {
3172       // We don't combine GEPs with complicated (nested) indexing.
3173       for (Value *V : VL) {
3174         if (cast<Instruction>(V)->getNumOperands() != 2) {
3175           LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
3176           BS.cancelScheduling(VL, VL0);
3177           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3178                        ReuseShuffleIndicies);
3179           return;
3180         }
3181       }
3182 
3183       // We can't combine several GEPs into one vector if they operate on
3184       // different types.
3185       Type *Ty0 = VL0->getOperand(0)->getType();
3186       for (Value *V : VL) {
3187         Type *CurTy = cast<Instruction>(V)->getOperand(0)->getType();
3188         if (Ty0 != CurTy) {
3189           LLVM_DEBUG(dbgs()
3190                      << "SLP: not-vectorizable GEP (different types).\n");
3191           BS.cancelScheduling(VL, VL0);
3192           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3193                        ReuseShuffleIndicies);
3194           return;
3195         }
3196       }
3197 
3198       // We don't combine GEPs with non-constant indexes.
3199       Type *Ty1 = VL0->getOperand(1)->getType();
3200       for (Value *V : VL) {
3201         auto Op = cast<Instruction>(V)->getOperand(1);
3202         if (!isa<ConstantInt>(Op) ||
3203             (Op->getType() != Ty1 &&
3204              Op->getType()->getScalarSizeInBits() >
3205                  DL->getIndexSizeInBits(
3206                      V->getType()->getPointerAddressSpace()))) {
3207           LLVM_DEBUG(dbgs()
3208                      << "SLP: not-vectorizable GEP (non-constant indexes).\n");
3209           BS.cancelScheduling(VL, VL0);
3210           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3211                        ReuseShuffleIndicies);
3212           return;
3213         }
3214       }
3215 
3216       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3217                                    ReuseShuffleIndicies);
3218       LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
3219       TE->setOperandsInOrder();
3220       for (unsigned i = 0, e = 2; i < e; ++i) {
3221         ValueList Operands;
3222         // Prepare the operand vector.
3223         for (Value *V : VL)
3224           Operands.push_back(cast<Instruction>(V)->getOperand(i));
3225 
3226         buildTree_rec(Operands, Depth + 1, {TE, i});
3227       }
3228       return;
3229     }
3230     case Instruction::Store: {
3231       // Check if the stores are consecutive or if we need to swizzle them.
3232       llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType();
3233       // Avoid types that are padded when being allocated as scalars, while
3234       // being packed together in a vector (such as i1).
3235       if (DL->getTypeSizeInBits(ScalarTy) !=
3236           DL->getTypeAllocSizeInBits(ScalarTy)) {
3237         BS.cancelScheduling(VL, VL0);
3238         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3239                      ReuseShuffleIndicies);
3240         LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n");
3241         return;
3242       }
3243       // Make sure all stores in the bundle are simple - we can't vectorize
3244       // atomic or volatile stores.
3245       SmallVector<Value *, 4> PointerOps(VL.size());
3246       ValueList Operands(VL.size());
3247       auto POIter = PointerOps.begin();
3248       auto OIter = Operands.begin();
3249       for (Value *V : VL) {
3250         auto *SI = cast<StoreInst>(V);
3251         if (!SI->isSimple()) {
3252           BS.cancelScheduling(VL, VL0);
3253           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3254                        ReuseShuffleIndicies);
3255           LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n");
3256           return;
3257         }
3258         *POIter = SI->getPointerOperand();
3259         *OIter = SI->getValueOperand();
3260         ++POIter;
3261         ++OIter;
3262       }
3263 
3264       OrdersType CurrentOrder;
3265       // Check the order of pointer operands.
3266       if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) {
3267         Value *Ptr0;
3268         Value *PtrN;
3269         if (CurrentOrder.empty()) {
3270           Ptr0 = PointerOps.front();
3271           PtrN = PointerOps.back();
3272         } else {
3273           Ptr0 = PointerOps[CurrentOrder.front()];
3274           PtrN = PointerOps[CurrentOrder.back()];
3275         }
3276         Optional<int> Dist =
3277             getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE);
3278         // Check that the sorted pointer operands are consecutive.
3279         if (static_cast<unsigned>(*Dist) == VL.size() - 1) {
3280           if (CurrentOrder.empty()) {
3281             // Original stores are consecutive and does not require reordering.
3282             ++NumOpsWantToKeepOriginalOrder;
3283             TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S,
3284                                          UserTreeIdx, ReuseShuffleIndicies);
3285             TE->setOperandsInOrder();
3286             buildTree_rec(Operands, Depth + 1, {TE, 0});
3287             LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
3288           } else {
3289             TreeEntry *TE =
3290                 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3291                              ReuseShuffleIndicies, CurrentOrder);
3292             TE->setOperandsInOrder();
3293             buildTree_rec(Operands, Depth + 1, {TE, 0});
3294             LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n");
3295             findRootOrder(CurrentOrder);
3296             ++NumOpsWantToKeepOrder[CurrentOrder];
3297           }
3298           return;
3299         }
3300       }
3301 
3302       BS.cancelScheduling(VL, VL0);
3303       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3304                    ReuseShuffleIndicies);
3305       LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
3306       return;
3307     }
3308     case Instruction::Call: {
3309       // Check if the calls are all to the same vectorizable intrinsic or
3310       // library function.
3311       CallInst *CI = cast<CallInst>(VL0);
3312       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3313 
3314       VFShape Shape = VFShape::get(
3315           *CI, ElementCount::getFixed(static_cast<unsigned int>(VL.size())),
3316           false /*HasGlobalPred*/);
3317       Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3318 
3319       if (!VecFunc && !isTriviallyVectorizable(ID)) {
3320         BS.cancelScheduling(VL, VL0);
3321         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3322                      ReuseShuffleIndicies);
3323         LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
3324         return;
3325       }
3326       Function *F = CI->getCalledFunction();
3327       unsigned NumArgs = CI->getNumArgOperands();
3328       SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr);
3329       for (unsigned j = 0; j != NumArgs; ++j)
3330         if (hasVectorInstrinsicScalarOpd(ID, j))
3331           ScalarArgs[j] = CI->getArgOperand(j);
3332       for (Value *V : VL) {
3333         CallInst *CI2 = dyn_cast<CallInst>(V);
3334         if (!CI2 || CI2->getCalledFunction() != F ||
3335             getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
3336             (VecFunc &&
3337              VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) ||
3338             !CI->hasIdenticalOperandBundleSchema(*CI2)) {
3339           BS.cancelScheduling(VL, VL0);
3340           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3341                        ReuseShuffleIndicies);
3342           LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V
3343                             << "\n");
3344           return;
3345         }
3346         // Some intrinsics have scalar arguments and should be same in order for
3347         // them to be vectorized.
3348         for (unsigned j = 0; j != NumArgs; ++j) {
3349           if (hasVectorInstrinsicScalarOpd(ID, j)) {
3350             Value *A1J = CI2->getArgOperand(j);
3351             if (ScalarArgs[j] != A1J) {
3352               BS.cancelScheduling(VL, VL0);
3353               newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3354                            ReuseShuffleIndicies);
3355               LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
3356                                 << " argument " << ScalarArgs[j] << "!=" << A1J
3357                                 << "\n");
3358               return;
3359             }
3360           }
3361         }
3362         // Verify that the bundle operands are identical between the two calls.
3363         if (CI->hasOperandBundles() &&
3364             !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(),
3365                         CI->op_begin() + CI->getBundleOperandsEndIndex(),
3366                         CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
3367           BS.cancelScheduling(VL, VL0);
3368           newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3369                        ReuseShuffleIndicies);
3370           LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:"
3371                             << *CI << "!=" << *V << '\n');
3372           return;
3373         }
3374       }
3375 
3376       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3377                                    ReuseShuffleIndicies);
3378       TE->setOperandsInOrder();
3379       for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
3380         ValueList Operands;
3381         // Prepare the operand vector.
3382         for (Value *V : VL) {
3383           auto *CI2 = cast<CallInst>(V);
3384           Operands.push_back(CI2->getArgOperand(i));
3385         }
3386         buildTree_rec(Operands, Depth + 1, {TE, i});
3387       }
3388       return;
3389     }
3390     case Instruction::ShuffleVector: {
3391       // If this is not an alternate sequence of opcode like add-sub
3392       // then do not vectorize this instruction.
3393       if (!S.isAltShuffle()) {
3394         BS.cancelScheduling(VL, VL0);
3395         newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3396                      ReuseShuffleIndicies);
3397         LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
3398         return;
3399       }
3400       TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
3401                                    ReuseShuffleIndicies);
3402       LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
3403 
3404       // Reorder operands if reordering would enable vectorization.
3405       if (isa<BinaryOperator>(VL0)) {
3406         ValueList Left, Right;
3407         reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
3408         TE->setOperand(0, Left);
3409         TE->setOperand(1, Right);
3410         buildTree_rec(Left, Depth + 1, {TE, 0});
3411         buildTree_rec(Right, Depth + 1, {TE, 1});
3412         return;
3413       }
3414 
3415       TE->setOperandsInOrder();
3416       for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
3417         ValueList Operands;
3418         // Prepare the operand vector.
3419         for (Value *V : VL)
3420           Operands.push_back(cast<Instruction>(V)->getOperand(i));
3421 
3422         buildTree_rec(Operands, Depth + 1, {TE, i});
3423       }
3424       return;
3425     }
3426     default:
3427       BS.cancelScheduling(VL, VL0);
3428       newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
3429                    ReuseShuffleIndicies);
3430       LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
3431       return;
3432   }
3433 }
3434 
3435 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const {
3436   unsigned N = 1;
3437   Type *EltTy = T;
3438 
3439   while (isa<StructType>(EltTy) || isa<ArrayType>(EltTy) ||
3440          isa<VectorType>(EltTy)) {
3441     if (auto *ST = dyn_cast<StructType>(EltTy)) {
3442       // Check that struct is homogeneous.
3443       for (const auto *Ty : ST->elements())
3444         if (Ty != *ST->element_begin())
3445           return 0;
3446       N *= ST->getNumElements();
3447       EltTy = *ST->element_begin();
3448     } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) {
3449       N *= AT->getNumElements();
3450       EltTy = AT->getElementType();
3451     } else {
3452       auto *VT = cast<FixedVectorType>(EltTy);
3453       N *= VT->getNumElements();
3454       EltTy = VT->getElementType();
3455     }
3456   }
3457 
3458   if (!isValidElementType(EltTy))
3459     return 0;
3460   uint64_t VTSize = DL.getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N));
3461   if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T))
3462     return 0;
3463   return N;
3464 }
3465 
3466 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
3467                               SmallVectorImpl<unsigned> &CurrentOrder) const {
3468   Instruction *E0 = cast<Instruction>(OpValue);
3469   assert(E0->getOpcode() == Instruction::ExtractElement ||
3470          E0->getOpcode() == Instruction::ExtractValue);
3471   assert(E0->getOpcode() == getSameOpcode(VL).getOpcode() && "Invalid opcode");
3472   // Check if all of the extracts come from the same vector and from the
3473   // correct offset.
3474   Value *Vec = E0->getOperand(0);
3475 
3476   CurrentOrder.clear();
3477 
3478   // We have to extract from a vector/aggregate with the same number of elements.
3479   unsigned NElts;
3480   if (E0->getOpcode() == Instruction::ExtractValue) {
3481     const DataLayout &DL = E0->getModule()->getDataLayout();
3482     NElts = canMapToVector(Vec->getType(), DL);
3483     if (!NElts)
3484       return false;
3485     // Check if load can be rewritten as load of vector.
3486     LoadInst *LI = dyn_cast<LoadInst>(Vec);
3487     if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size()))
3488       return false;
3489   } else {
3490     NElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
3491   }
3492 
3493   if (NElts != VL.size())
3494     return false;
3495 
3496   // Check that all of the indices extract from the correct offset.
3497   bool ShouldKeepOrder = true;
3498   unsigned E = VL.size();
3499   // Assign to all items the initial value E + 1 so we can check if the extract
3500   // instruction index was used already.
3501   // Also, later we can check that all the indices are used and we have a
3502   // consecutive access in the extract instructions, by checking that no
3503   // element of CurrentOrder still has value E + 1.
3504   CurrentOrder.assign(E, E + 1);
3505   unsigned I = 0;
3506   for (; I < E; ++I) {
3507     auto *Inst = cast<Instruction>(VL[I]);
3508     if (Inst->getOperand(0) != Vec)
3509       break;
3510     Optional<unsigned> Idx = getExtractIndex(Inst);
3511     if (!Idx)
3512       break;
3513     const unsigned ExtIdx = *Idx;
3514     if (ExtIdx != I) {
3515       if (ExtIdx >= E || CurrentOrder[ExtIdx] != E + 1)
3516         break;
3517       ShouldKeepOrder = false;
3518       CurrentOrder[ExtIdx] = I;
3519     } else {
3520       if (CurrentOrder[I] != E + 1)
3521         break;
3522       CurrentOrder[I] = I;
3523     }
3524   }
3525   if (I < E) {
3526     CurrentOrder.clear();
3527     return false;
3528   }
3529 
3530   return ShouldKeepOrder;
3531 }
3532 
3533 bool BoUpSLP::areAllUsersVectorized(Instruction *I,
3534                                     ArrayRef<Value *> VectorizedVals) const {
3535   return (I->hasOneUse() && is_contained(VectorizedVals, I)) ||
3536          llvm::all_of(I->users(), [this](User *U) {
3537            return ScalarToTreeEntry.count(U) > 0;
3538          });
3539 }
3540 
3541 static std::pair<InstructionCost, InstructionCost>
3542 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy,
3543                    TargetTransformInfo *TTI, TargetLibraryInfo *TLI) {
3544   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3545 
3546   // Calculate the cost of the scalar and vector calls.
3547   SmallVector<Type *, 4> VecTys;
3548   for (Use &Arg : CI->args())
3549     VecTys.push_back(
3550         FixedVectorType::get(Arg->getType(), VecTy->getNumElements()));
3551   FastMathFlags FMF;
3552   if (auto *FPCI = dyn_cast<FPMathOperator>(CI))
3553     FMF = FPCI->getFastMathFlags();
3554   SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end());
3555   IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF,
3556                                     dyn_cast<IntrinsicInst>(CI));
3557   auto IntrinsicCost =
3558     TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput);
3559 
3560   auto Shape = VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>(
3561                                      VecTy->getNumElements())),
3562                             false /*HasGlobalPred*/);
3563   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3564   auto LibCost = IntrinsicCost;
3565   if (!CI->isNoBuiltin() && VecFunc) {
3566     // Calculate the cost of the vector library call.
3567     // If the corresponding vector call is cheaper, return its cost.
3568     LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys,
3569                                     TTI::TCK_RecipThroughput);
3570   }
3571   return {IntrinsicCost, LibCost};
3572 }
3573 
3574 /// Compute the cost of creating a vector of type \p VecTy containing the
3575 /// extracted values from \p VL.
3576 static InstructionCost
3577 computeExtractCost(ArrayRef<Value *> VL, FixedVectorType *VecTy,
3578                    TargetTransformInfo::ShuffleKind ShuffleKind,
3579                    ArrayRef<int> Mask, TargetTransformInfo &TTI) {
3580   unsigned NumOfParts = TTI.getNumberOfParts(VecTy);
3581 
3582   if (ShuffleKind != TargetTransformInfo::SK_PermuteSingleSrc || !NumOfParts ||
3583       VecTy->getNumElements() < NumOfParts)
3584     return TTI.getShuffleCost(ShuffleKind, VecTy, Mask);
3585 
3586   bool AllConsecutive = true;
3587   unsigned EltsPerVector = VecTy->getNumElements() / NumOfParts;
3588   unsigned Idx = -1;
3589   InstructionCost Cost = 0;
3590 
3591   // Process extracts in blocks of EltsPerVector to check if the source vector
3592   // operand can be re-used directly. If not, add the cost of creating a shuffle
3593   // to extract the values into a vector register.
3594   for (auto *V : VL) {
3595     ++Idx;
3596 
3597     // Reached the start of a new vector registers.
3598     if (Idx % EltsPerVector == 0) {
3599       AllConsecutive = true;
3600       continue;
3601     }
3602 
3603     // Check all extracts for a vector register on the target directly
3604     // extract values in order.
3605     unsigned CurrentIdx = *getExtractIndex(cast<Instruction>(V));
3606     unsigned PrevIdx = *getExtractIndex(cast<Instruction>(VL[Idx - 1]));
3607     AllConsecutive &= PrevIdx + 1 == CurrentIdx &&
3608                       CurrentIdx % EltsPerVector == Idx % EltsPerVector;
3609 
3610     if (AllConsecutive)
3611       continue;
3612 
3613     // Skip all indices, except for the last index per vector block.
3614     if ((Idx + 1) % EltsPerVector != 0 && Idx + 1 != VL.size())
3615       continue;
3616 
3617     // If we have a series of extracts which are not consecutive and hence
3618     // cannot re-use the source vector register directly, compute the shuffle
3619     // cost to extract the a vector with EltsPerVector elements.
3620     Cost += TTI.getShuffleCost(
3621         TargetTransformInfo::SK_PermuteSingleSrc,
3622         FixedVectorType::get(VecTy->getElementType(), EltsPerVector));
3623   }
3624   return Cost;
3625 }
3626 
3627 /// Shuffles \p Mask in accordance with the given \p SubMask.
3628 static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask) {
3629   if (SubMask.empty())
3630     return;
3631   if (Mask.empty()) {
3632     Mask.append(SubMask.begin(), SubMask.end());
3633     return;
3634   }
3635   SmallVector<int, 4> NewMask(SubMask.size(), SubMask.size());
3636   int TermValue = std::min(Mask.size(), SubMask.size());
3637   for (int I = 0, E = SubMask.size(); I < E; ++I) {
3638     if (SubMask[I] >= TermValue || SubMask[I] == UndefMaskElem ||
3639         Mask[SubMask[I]] >= TermValue) {
3640       NewMask[I] = UndefMaskElem;
3641       continue;
3642     }
3643     NewMask[I] = Mask[SubMask[I]];
3644   }
3645   Mask.swap(NewMask);
3646 }
3647 
3648 InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
3649                                       ArrayRef<Value *> VectorizedVals) {
3650   ArrayRef<Value*> VL = E->Scalars;
3651 
3652   Type *ScalarTy = VL[0]->getType();
3653   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
3654     ScalarTy = SI->getValueOperand()->getType();
3655   else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0]))
3656     ScalarTy = CI->getOperand(0)->getType();
3657   else if (auto *IE = dyn_cast<InsertElementInst>(VL[0]))
3658     ScalarTy = IE->getOperand(1)->getType();
3659   auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
3660   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
3661 
3662   // If we have computed a smaller type for the expression, update VecTy so
3663   // that the costs will be accurate.
3664   if (MinBWs.count(VL[0]))
3665     VecTy = FixedVectorType::get(
3666         IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size());
3667   auto *FinalVecTy = VecTy;
3668 
3669   unsigned ReuseShuffleNumbers = E->ReuseShuffleIndices.size();
3670   bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
3671   if (NeedToShuffleReuses)
3672     FinalVecTy =
3673         FixedVectorType::get(VecTy->getElementType(), ReuseShuffleNumbers);
3674   // FIXME: it tries to fix a problem with MSVC buildbots.
3675   TargetTransformInfo &TTIRef = *TTI;
3676   auto &&AdjustExtractsCost = [this, &TTIRef, CostKind, VL, VecTy,
3677                                VectorizedVals](InstructionCost &Cost,
3678                                                bool IsGather) {
3679     DenseMap<Value *, int> ExtractVectorsTys;
3680     for (auto *V : VL) {
3681       // If all users of instruction are going to be vectorized and this
3682       // instruction itself is not going to be vectorized, consider this
3683       // instruction as dead and remove its cost from the final cost of the
3684       // vectorized tree.
3685       if (!areAllUsersVectorized(cast<Instruction>(V), VectorizedVals) ||
3686           (IsGather && ScalarToTreeEntry.count(V)))
3687         continue;
3688       auto *EE = cast<ExtractElementInst>(V);
3689       unsigned Idx = *getExtractIndex(EE);
3690       if (TTIRef.getNumberOfParts(VecTy) !=
3691           TTIRef.getNumberOfParts(EE->getVectorOperandType())) {
3692         auto It =
3693             ExtractVectorsTys.try_emplace(EE->getVectorOperand(), Idx).first;
3694         It->getSecond() = std::min<int>(It->second, Idx);
3695       }
3696       // Take credit for instruction that will become dead.
3697       if (EE->hasOneUse()) {
3698         Instruction *Ext = EE->user_back();
3699         if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
3700             all_of(Ext->users(),
3701                    [](User *U) { return isa<GetElementPtrInst>(U); })) {
3702           // Use getExtractWithExtendCost() to calculate the cost of
3703           // extractelement/ext pair.
3704           Cost -=
3705               TTIRef.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(),
3706                                               EE->getVectorOperandType(), Idx);
3707           // Add back the cost of s|zext which is subtracted separately.
3708           Cost += TTIRef.getCastInstrCost(
3709               Ext->getOpcode(), Ext->getType(), EE->getType(),
3710               TTI::getCastContextHint(Ext), CostKind, Ext);
3711           continue;
3712         }
3713       }
3714       Cost -= TTIRef.getVectorInstrCost(Instruction::ExtractElement,
3715                                         EE->getVectorOperandType(), Idx);
3716     }
3717     // Add a cost for subvector extracts/inserts if required.
3718     for (const auto &Data : ExtractVectorsTys) {
3719       auto *EEVTy = cast<FixedVectorType>(Data.first->getType());
3720       unsigned NumElts = VecTy->getNumElements();
3721       if (TTIRef.getNumberOfParts(EEVTy) > TTIRef.getNumberOfParts(VecTy)) {
3722         unsigned Idx = (Data.second / NumElts) * NumElts;
3723         unsigned EENumElts = EEVTy->getNumElements();
3724         if (Idx + NumElts <= EENumElts) {
3725           Cost +=
3726               TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
3727                                     EEVTy, None, Idx, VecTy);
3728         } else {
3729           // Need to round up the subvector type vectorization factor to avoid a
3730           // crash in cost model functions. Make SubVT so that Idx + VF of SubVT
3731           // <= EENumElts.
3732           auto *SubVT =
3733               FixedVectorType::get(VecTy->getElementType(), EENumElts - Idx);
3734           Cost +=
3735               TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
3736                                     EEVTy, None, Idx, SubVT);
3737         }
3738       } else {
3739         Cost += TTIRef.getShuffleCost(TargetTransformInfo::SK_InsertSubvector,
3740                                       VecTy, None, 0, EEVTy);
3741       }
3742     }
3743   };
3744   if (E->State == TreeEntry::NeedToGather) {
3745     if (allConstant(VL))
3746       return 0;
3747     if (isa<InsertElementInst>(VL[0]))
3748       return InstructionCost::getInvalid();
3749     SmallVector<int> Mask;
3750     SmallVector<const TreeEntry *> Entries;
3751     Optional<TargetTransformInfo::ShuffleKind> Shuffle =
3752         isGatherShuffledEntry(E, Mask, Entries);
3753     if (Shuffle.hasValue()) {
3754       InstructionCost GatherCost = 0;
3755       if (ShuffleVectorInst::isIdentityMask(Mask)) {
3756         // Perfect match in the graph, will reuse the previously vectorized
3757         // node. Cost is 0.
3758         LLVM_DEBUG(
3759             dbgs()
3760             << "SLP: perfect diamond match for gather bundle that starts with "
3761             << *VL.front() << ".\n");
3762         if (NeedToShuffleReuses)
3763           GatherCost =
3764               TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc,
3765                                   FinalVecTy, E->ReuseShuffleIndices);
3766       } else {
3767         LLVM_DEBUG(dbgs() << "SLP: shuffled " << Entries.size()
3768                           << " entries for bundle that starts with "
3769                           << *VL.front() << ".\n");
3770         // Detected that instead of gather we can emit a shuffle of single/two
3771         // previously vectorized nodes. Add the cost of the permutation rather
3772         // than gather.
3773         ::addMask(Mask, E->ReuseShuffleIndices);
3774         GatherCost = TTI->getShuffleCost(*Shuffle, FinalVecTy, Mask);
3775       }
3776       return GatherCost;
3777     }
3778     if (isSplat(VL)) {
3779       // Found the broadcasting of the single scalar, calculate the cost as the
3780       // broadcast.
3781       return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy);
3782     }
3783     if (E->getOpcode() == Instruction::ExtractElement && allSameType(VL) &&
3784         allSameBlock(VL) &&
3785         !isa<ScalableVectorType>(
3786             cast<ExtractElementInst>(E->getMainOp())->getVectorOperandType())) {
3787       // Check that gather of extractelements can be represented as just a
3788       // shuffle of a single/two vectors the scalars are extracted from.
3789       SmallVector<int> Mask;
3790       Optional<TargetTransformInfo::ShuffleKind> ShuffleKind =
3791           isShuffle(VL, Mask);
3792       if (ShuffleKind.hasValue()) {
3793         // Found the bunch of extractelement instructions that must be gathered
3794         // into a vector and can be represented as a permutation elements in a
3795         // single input vector or of 2 input vectors.
3796         InstructionCost Cost =
3797             computeExtractCost(VL, VecTy, *ShuffleKind, Mask, *TTI);
3798         AdjustExtractsCost(Cost, /*IsGather=*/true);
3799         if (NeedToShuffleReuses)
3800           Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc,
3801                                       FinalVecTy, E->ReuseShuffleIndices);
3802         return Cost;
3803       }
3804     }
3805     InstructionCost ReuseShuffleCost = 0;
3806     if (NeedToShuffleReuses)
3807       ReuseShuffleCost = TTI->getShuffleCost(
3808           TTI::SK_PermuteSingleSrc, FinalVecTy, E->ReuseShuffleIndices);
3809     return ReuseShuffleCost + getGatherCost(VL);
3810   }
3811   InstructionCost CommonCost = 0;
3812   SmallVector<int> Mask;
3813   if (!E->ReorderIndices.empty()) {
3814     SmallVector<int> NewMask;
3815     if (E->getOpcode() == Instruction::Store) {
3816       // For stores the order is actually a mask.
3817       NewMask.resize(E->ReorderIndices.size());
3818       copy(E->ReorderIndices, NewMask.begin());
3819     } else {
3820       inversePermutation(E->ReorderIndices, NewMask);
3821     }
3822     ::addMask(Mask, NewMask);
3823   }
3824   if (NeedToShuffleReuses)
3825     ::addMask(Mask, E->ReuseShuffleIndices);
3826   if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask))
3827     CommonCost =
3828         TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask);
3829   assert((E->State == TreeEntry::Vectorize ||
3830           E->State == TreeEntry::ScatterVectorize) &&
3831          "Unhandled state");
3832   assert(E->getOpcode() && allSameType(VL) && allSameBlock(VL) && "Invalid VL");
3833   Instruction *VL0 = E->getMainOp();
3834   unsigned ShuffleOrOp =
3835       E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
3836   switch (ShuffleOrOp) {
3837     case Instruction::PHI:
3838       return 0;
3839 
3840     case Instruction::ExtractValue:
3841     case Instruction::ExtractElement: {
3842       // The common cost of removal ExtractElement/ExtractValue instructions +
3843       // the cost of shuffles, if required to resuffle the original vector.
3844       if (NeedToShuffleReuses) {
3845         unsigned Idx = 0;
3846         for (unsigned I : E->ReuseShuffleIndices) {
3847           if (ShuffleOrOp == Instruction::ExtractElement) {
3848             auto *EE = cast<ExtractElementInst>(VL[I]);
3849             CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement,
3850                                                   EE->getVectorOperandType(),
3851                                                   *getExtractIndex(EE));
3852           } else {
3853             CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement,
3854                                                   VecTy, Idx);
3855             ++Idx;
3856           }
3857         }
3858         Idx = ReuseShuffleNumbers;
3859         for (Value *V : VL) {
3860           if (ShuffleOrOp == Instruction::ExtractElement) {
3861             auto *EE = cast<ExtractElementInst>(V);
3862             CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement,
3863                                                   EE->getVectorOperandType(),
3864                                                   *getExtractIndex(EE));
3865           } else {
3866             --Idx;
3867             CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement,
3868                                                   VecTy, Idx);
3869           }
3870         }
3871       }
3872       if (ShuffleOrOp == Instruction::ExtractValue) {
3873         for (unsigned I = 0, E = VL.size(); I < E; ++I) {
3874           auto *EI = cast<Instruction>(VL[I]);
3875           // Take credit for instruction that will become dead.
3876           if (EI->hasOneUse()) {
3877             Instruction *Ext = EI->user_back();
3878             if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
3879                 all_of(Ext->users(),
3880                        [](User *U) { return isa<GetElementPtrInst>(U); })) {
3881               // Use getExtractWithExtendCost() to calculate the cost of
3882               // extractelement/ext pair.
3883               CommonCost -= TTI->getExtractWithExtendCost(
3884                   Ext->getOpcode(), Ext->getType(), VecTy, I);
3885               // Add back the cost of s|zext which is subtracted separately.
3886               CommonCost += TTI->getCastInstrCost(
3887                   Ext->getOpcode(), Ext->getType(), EI->getType(),
3888                   TTI::getCastContextHint(Ext), CostKind, Ext);
3889               continue;
3890             }
3891           }
3892           CommonCost -=
3893               TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, I);
3894         }
3895       } else {
3896         AdjustExtractsCost(CommonCost, /*IsGather=*/false);
3897       }
3898       return CommonCost;
3899     }
3900     case Instruction::InsertElement: {
3901       auto *SrcVecTy = cast<FixedVectorType>(VL0->getType());
3902 
3903       unsigned const NumElts = SrcVecTy->getNumElements();
3904       unsigned const NumScalars = VL.size();
3905       APInt DemandedElts = APInt::getNullValue(NumElts);
3906       // TODO: Add support for Instruction::InsertValue.
3907       unsigned Offset = UINT_MAX;
3908       bool IsIdentity = true;
3909       SmallVector<int> ShuffleMask(NumElts, UndefMaskElem);
3910       for (unsigned I = 0; I < NumScalars; ++I) {
3911         Optional<int> InsertIdx = getInsertIndex(VL[I], 0);
3912         if (!InsertIdx || *InsertIdx == UndefMaskElem)
3913           continue;
3914         unsigned Idx = *InsertIdx;
3915         DemandedElts.setBit(Idx);
3916         if (Idx < Offset) {
3917           Offset = Idx;
3918           IsIdentity &= I == 0;
3919         } else {
3920           assert(Idx >= Offset && "Failed to find vector index offset");
3921           IsIdentity &= Idx - Offset == I;
3922         }
3923         ShuffleMask[Idx] = I;
3924       }
3925       assert(Offset < NumElts && "Failed to find vector index offset");
3926 
3927       InstructionCost Cost = 0;
3928       Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts,
3929                                             /*Insert*/ true, /*Extract*/ false);
3930 
3931       if (IsIdentity && NumElts != NumScalars && Offset % NumScalars != 0) {
3932         // FIXME: Replace with SK_InsertSubvector once it is properly supported.
3933         unsigned Sz = PowerOf2Ceil(Offset + NumScalars);
3934         Cost += TTI->getShuffleCost(
3935             TargetTransformInfo::SK_PermuteSingleSrc,
3936             FixedVectorType::get(SrcVecTy->getElementType(), Sz));
3937       } else if (!IsIdentity) {
3938         Cost += TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, SrcVecTy,
3939                                     ShuffleMask);
3940       }
3941 
3942       return Cost;
3943     }
3944     case Instruction::ZExt:
3945     case Instruction::SExt:
3946     case Instruction::FPToUI:
3947     case Instruction::FPToSI:
3948     case Instruction::FPExt:
3949     case Instruction::PtrToInt:
3950     case Instruction::IntToPtr:
3951     case Instruction::SIToFP:
3952     case Instruction::UIToFP:
3953     case Instruction::Trunc:
3954     case Instruction::FPTrunc:
3955     case Instruction::BitCast: {
3956       Type *SrcTy = VL0->getOperand(0)->getType();
3957       InstructionCost ScalarEltCost =
3958           TTI->getCastInstrCost(E->getOpcode(), ScalarTy, SrcTy,
3959                                 TTI::getCastContextHint(VL0), CostKind, VL0);
3960       if (NeedToShuffleReuses) {
3961         CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3962       }
3963 
3964       // Calculate the cost of this instruction.
3965       InstructionCost ScalarCost = VL.size() * ScalarEltCost;
3966 
3967       auto *SrcVecTy = FixedVectorType::get(SrcTy, VL.size());
3968       InstructionCost VecCost = 0;
3969       // Check if the values are candidates to demote.
3970       if (!MinBWs.count(VL0) || VecTy != SrcVecTy) {
3971         VecCost = CommonCost + TTI->getCastInstrCost(
3972                                    E->getOpcode(), VecTy, SrcVecTy,
3973                                    TTI::getCastContextHint(VL0), CostKind, VL0);
3974       }
3975       LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
3976       return VecCost - ScalarCost;
3977     }
3978     case Instruction::FCmp:
3979     case Instruction::ICmp:
3980     case Instruction::Select: {
3981       // Calculate the cost of this instruction.
3982       InstructionCost ScalarEltCost =
3983           TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, Builder.getInt1Ty(),
3984                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, VL0);
3985       if (NeedToShuffleReuses) {
3986         CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
3987       }
3988       auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size());
3989       InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost;
3990 
3991       // Check if all entries in VL are either compares or selects with compares
3992       // as condition that have the same predicates.
3993       CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE;
3994       bool First = true;
3995       for (auto *V : VL) {
3996         CmpInst::Predicate CurrentPred;
3997         auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value());
3998         if ((!match(V, m_Select(MatchCmp, m_Value(), m_Value())) &&
3999              !match(V, MatchCmp)) ||
4000             (!First && VecPred != CurrentPred)) {
4001           VecPred = CmpInst::BAD_ICMP_PREDICATE;
4002           break;
4003         }
4004         First = false;
4005         VecPred = CurrentPred;
4006       }
4007 
4008       InstructionCost VecCost = TTI->getCmpSelInstrCost(
4009           E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0);
4010       // Check if it is possible and profitable to use min/max for selects in
4011       // VL.
4012       //
4013       auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL);
4014       if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) {
4015         IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy,
4016                                           {VecTy, VecTy});
4017         InstructionCost IntrinsicCost =
4018             TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
4019         // If the selects are the only uses of the compares, they will be dead
4020         // and we can adjust the cost by removing their cost.
4021         if (IntrinsicAndUse.second)
4022           IntrinsicCost -=
4023               TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy, MaskTy,
4024                                       CmpInst::BAD_ICMP_PREDICATE, CostKind);
4025         VecCost = std::min(VecCost, IntrinsicCost);
4026       }
4027       LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
4028       return CommonCost + VecCost - ScalarCost;
4029     }
4030     case Instruction::FNeg:
4031     case Instruction::Add:
4032     case Instruction::FAdd:
4033     case Instruction::Sub:
4034     case Instruction::FSub:
4035     case Instruction::Mul:
4036     case Instruction::FMul:
4037     case Instruction::UDiv:
4038     case Instruction::SDiv:
4039     case Instruction::FDiv:
4040     case Instruction::URem:
4041     case Instruction::SRem:
4042     case Instruction::FRem:
4043     case Instruction::Shl:
4044     case Instruction::LShr:
4045     case Instruction::AShr:
4046     case Instruction::And:
4047     case Instruction::Or:
4048     case Instruction::Xor: {
4049       // Certain instructions can be cheaper to vectorize if they have a
4050       // constant second vector operand.
4051       TargetTransformInfo::OperandValueKind Op1VK =
4052           TargetTransformInfo::OK_AnyValue;
4053       TargetTransformInfo::OperandValueKind Op2VK =
4054           TargetTransformInfo::OK_UniformConstantValue;
4055       TargetTransformInfo::OperandValueProperties Op1VP =
4056           TargetTransformInfo::OP_None;
4057       TargetTransformInfo::OperandValueProperties Op2VP =
4058           TargetTransformInfo::OP_PowerOf2;
4059 
4060       // If all operands are exactly the same ConstantInt then set the
4061       // operand kind to OK_UniformConstantValue.
4062       // If instead not all operands are constants, then set the operand kind
4063       // to OK_AnyValue. If all operands are constants but not the same,
4064       // then set the operand kind to OK_NonUniformConstantValue.
4065       ConstantInt *CInt0 = nullptr;
4066       for (unsigned i = 0, e = VL.size(); i < e; ++i) {
4067         const Instruction *I = cast<Instruction>(VL[i]);
4068         unsigned OpIdx = isa<BinaryOperator>(I) ? 1 : 0;
4069         ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(OpIdx));
4070         if (!CInt) {
4071           Op2VK = TargetTransformInfo::OK_AnyValue;
4072           Op2VP = TargetTransformInfo::OP_None;
4073           break;
4074         }
4075         if (Op2VP == TargetTransformInfo::OP_PowerOf2 &&
4076             !CInt->getValue().isPowerOf2())
4077           Op2VP = TargetTransformInfo::OP_None;
4078         if (i == 0) {
4079           CInt0 = CInt;
4080           continue;
4081         }
4082         if (CInt0 != CInt)
4083           Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
4084       }
4085 
4086       SmallVector<const Value *, 4> Operands(VL0->operand_values());
4087       InstructionCost ScalarEltCost =
4088           TTI->getArithmeticInstrCost(E->getOpcode(), ScalarTy, CostKind, Op1VK,
4089                                       Op2VK, Op1VP, Op2VP, Operands, VL0);
4090       if (NeedToShuffleReuses) {
4091         CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
4092       }
4093       InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost;
4094       InstructionCost VecCost =
4095           TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind, Op1VK,
4096                                       Op2VK, Op1VP, Op2VP, Operands, VL0);
4097       LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
4098       return CommonCost + VecCost - ScalarCost;
4099     }
4100     case Instruction::GetElementPtr: {
4101       TargetTransformInfo::OperandValueKind Op1VK =
4102           TargetTransformInfo::OK_AnyValue;
4103       TargetTransformInfo::OperandValueKind Op2VK =
4104           TargetTransformInfo::OK_UniformConstantValue;
4105 
4106       InstructionCost ScalarEltCost = TTI->getArithmeticInstrCost(
4107           Instruction::Add, ScalarTy, CostKind, Op1VK, Op2VK);
4108       if (NeedToShuffleReuses) {
4109         CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
4110       }
4111       InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost;
4112       InstructionCost VecCost = TTI->getArithmeticInstrCost(
4113           Instruction::Add, VecTy, CostKind, Op1VK, Op2VK);
4114       LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
4115       return CommonCost + VecCost - ScalarCost;
4116     }
4117     case Instruction::Load: {
4118       // Cost of wide load - cost of scalar loads.
4119       Align Alignment = cast<LoadInst>(VL0)->getAlign();
4120       InstructionCost ScalarEltCost = TTI->getMemoryOpCost(
4121           Instruction::Load, ScalarTy, Alignment, 0, CostKind, VL0);
4122       if (NeedToShuffleReuses) {
4123         CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
4124       }
4125       InstructionCost ScalarLdCost = VecTy->getNumElements() * ScalarEltCost;
4126       InstructionCost VecLdCost;
4127       if (E->State == TreeEntry::Vectorize) {
4128         VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, Alignment, 0,
4129                                          CostKind, VL0);
4130       } else {
4131         assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState");
4132         Align CommonAlignment = Alignment;
4133         for (Value *V : VL)
4134           CommonAlignment =
4135               commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign());
4136         VecLdCost = TTI->getGatherScatterOpCost(
4137             Instruction::Load, VecTy, cast<LoadInst>(VL0)->getPointerOperand(),
4138             /*VariableMask=*/false, CommonAlignment, CostKind, VL0);
4139       }
4140       LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecLdCost, ScalarLdCost));
4141       return CommonCost + VecLdCost - ScalarLdCost;
4142     }
4143     case Instruction::Store: {
4144       // We know that we can merge the stores. Calculate the cost.
4145       bool IsReorder = !E->ReorderIndices.empty();
4146       auto *SI =
4147           cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0);
4148       Align Alignment = SI->getAlign();
4149       InstructionCost ScalarEltCost = TTI->getMemoryOpCost(
4150           Instruction::Store, ScalarTy, Alignment, 0, CostKind, VL0);
4151       InstructionCost ScalarStCost = VecTy->getNumElements() * ScalarEltCost;
4152       InstructionCost VecStCost = TTI->getMemoryOpCost(
4153           Instruction::Store, VecTy, Alignment, 0, CostKind, VL0);
4154       LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecStCost, ScalarStCost));
4155       return CommonCost + VecStCost - ScalarStCost;
4156     }
4157     case Instruction::Call: {
4158       CallInst *CI = cast<CallInst>(VL0);
4159       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4160 
4161       // Calculate the cost of the scalar and vector calls.
4162       IntrinsicCostAttributes CostAttrs(ID, *CI, 1);
4163       InstructionCost ScalarEltCost =
4164           TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
4165       if (NeedToShuffleReuses) {
4166         CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost;
4167       }
4168       InstructionCost ScalarCallCost = VecTy->getNumElements() * ScalarEltCost;
4169 
4170       auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI);
4171       InstructionCost VecCallCost =
4172           std::min(VecCallCosts.first, VecCallCosts.second);
4173 
4174       LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost
4175                         << " (" << VecCallCost << "-" << ScalarCallCost << ")"
4176                         << " for " << *CI << "\n");
4177 
4178       return CommonCost + VecCallCost - ScalarCallCost;
4179     }
4180     case Instruction::ShuffleVector: {
4181       assert(E->isAltShuffle() &&
4182              ((Instruction::isBinaryOp(E->getOpcode()) &&
4183                Instruction::isBinaryOp(E->getAltOpcode())) ||
4184               (Instruction::isCast(E->getOpcode()) &&
4185                Instruction::isCast(E->getAltOpcode()))) &&
4186              "Invalid Shuffle Vector Operand");
4187       InstructionCost ScalarCost = 0;
4188       if (NeedToShuffleReuses) {
4189         for (unsigned Idx : E->ReuseShuffleIndices) {
4190           Instruction *I = cast<Instruction>(VL[Idx]);
4191           CommonCost -= TTI->getInstructionCost(I, CostKind);
4192         }
4193         for (Value *V : VL) {
4194           Instruction *I = cast<Instruction>(V);
4195           CommonCost += TTI->getInstructionCost(I, CostKind);
4196         }
4197       }
4198       for (Value *V : VL) {
4199         Instruction *I = cast<Instruction>(V);
4200         assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode");
4201         ScalarCost += TTI->getInstructionCost(I, CostKind);
4202       }
4203       // VecCost is equal to sum of the cost of creating 2 vectors
4204       // and the cost of creating shuffle.
4205       InstructionCost VecCost = 0;
4206       if (Instruction::isBinaryOp(E->getOpcode())) {
4207         VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind);
4208         VecCost += TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy,
4209                                                CostKind);
4210       } else {
4211         Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType();
4212         Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType();
4213         auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size());
4214         auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size());
4215         VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty,
4216                                         TTI::CastContextHint::None, CostKind);
4217         VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty,
4218                                          TTI::CastContextHint::None, CostKind);
4219       }
4220 
4221       SmallVector<int> Mask(E->Scalars.size());
4222       for (unsigned I = 0, End = E->Scalars.size(); I < End; ++I) {
4223         auto *OpInst = cast<Instruction>(E->Scalars[I]);
4224         assert(E->isOpcodeOrAlt(OpInst) && "Unexpected main/alternate opcode");
4225         Mask[I] = I + (OpInst->getOpcode() == E->getAltOpcode() ? End : 0);
4226       }
4227       VecCost +=
4228           TTI->getShuffleCost(TargetTransformInfo::SK_Select, VecTy, Mask, 0);
4229       LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
4230       return CommonCost + VecCost - ScalarCost;
4231     }
4232     default:
4233       llvm_unreachable("Unknown instruction");
4234   }
4235 }
4236 
4237 bool BoUpSLP::isFullyVectorizableTinyTree() const {
4238   LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height "
4239                     << VectorizableTree.size() << " is fully vectorizable .\n");
4240 
4241   // We only handle trees of heights 1 and 2.
4242   if (VectorizableTree.size() == 1 &&
4243       VectorizableTree[0]->State == TreeEntry::Vectorize)
4244     return true;
4245 
4246   if (VectorizableTree.size() != 2)
4247     return false;
4248 
4249   // Handle splat and all-constants stores. Also try to vectorize tiny trees
4250   // with the second gather nodes if they have less scalar operands rather than
4251   // the initial tree element (may be profitable to shuffle the second gather)
4252   // or they are extractelements, which form shuffle.
4253   SmallVector<int> Mask;
4254   if (VectorizableTree[0]->State == TreeEntry::Vectorize &&
4255       (allConstant(VectorizableTree[1]->Scalars) ||
4256        isSplat(VectorizableTree[1]->Scalars) ||
4257        (VectorizableTree[1]->State == TreeEntry::NeedToGather &&
4258         VectorizableTree[1]->Scalars.size() <
4259             VectorizableTree[0]->Scalars.size()) ||
4260        (VectorizableTree[1]->State == TreeEntry::NeedToGather &&
4261         VectorizableTree[1]->getOpcode() == Instruction::ExtractElement &&
4262         isShuffle(VectorizableTree[1]->Scalars, Mask))))
4263     return true;
4264 
4265   // Gathering cost would be too much for tiny trees.
4266   if (VectorizableTree[0]->State == TreeEntry::NeedToGather ||
4267       VectorizableTree[1]->State == TreeEntry::NeedToGather)
4268     return false;
4269 
4270   return true;
4271 }
4272 
4273 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts,
4274                                        TargetTransformInfo *TTI,
4275                                        bool MustMatchOrInst) {
4276   // Look past the root to find a source value. Arbitrarily follow the
4277   // path through operand 0 of any 'or'. Also, peek through optional
4278   // shift-left-by-multiple-of-8-bits.
4279   Value *ZextLoad = Root;
4280   const APInt *ShAmtC;
4281   bool FoundOr = false;
4282   while (!isa<ConstantExpr>(ZextLoad) &&
4283          (match(ZextLoad, m_Or(m_Value(), m_Value())) ||
4284           (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) &&
4285            ShAmtC->urem(8) == 0))) {
4286     auto *BinOp = cast<BinaryOperator>(ZextLoad);
4287     ZextLoad = BinOp->getOperand(0);
4288     if (BinOp->getOpcode() == Instruction::Or)
4289       FoundOr = true;
4290   }
4291   // Check if the input is an extended load of the required or/shift expression.
4292   Value *LoadPtr;
4293   if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root ||
4294       !match(ZextLoad, m_ZExt(m_Load(m_Value(LoadPtr)))))
4295     return false;
4296 
4297   // Require that the total load bit width is a legal integer type.
4298   // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target.
4299   // But <16 x i8> --> i128 is not, so the backend probably can't reduce it.
4300   Type *SrcTy = LoadPtr->getType()->getPointerElementType();
4301   unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts;
4302   if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth)))
4303     return false;
4304 
4305   // Everything matched - assume that we can fold the whole sequence using
4306   // load combining.
4307   LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at "
4308              << *(cast<Instruction>(Root)) << "\n");
4309 
4310   return true;
4311 }
4312 
4313 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const {
4314   if (RdxKind != RecurKind::Or)
4315     return false;
4316 
4317   unsigned NumElts = VectorizableTree[0]->Scalars.size();
4318   Value *FirstReduced = VectorizableTree[0]->Scalars[0];
4319   return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI,
4320                                     /* MatchOr */ false);
4321 }
4322 
4323 bool BoUpSLP::isLoadCombineCandidate() const {
4324   // Peek through a final sequence of stores and check if all operations are
4325   // likely to be load-combined.
4326   unsigned NumElts = VectorizableTree[0]->Scalars.size();
4327   for (Value *Scalar : VectorizableTree[0]->Scalars) {
4328     Value *X;
4329     if (!match(Scalar, m_Store(m_Value(X), m_Value())) ||
4330         !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true))
4331       return false;
4332   }
4333   return true;
4334 }
4335 
4336 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() const {
4337   // No need to vectorize inserts of gathered values.
4338   if (VectorizableTree.size() == 2 &&
4339       isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) &&
4340       VectorizableTree[1]->State == TreeEntry::NeedToGather)
4341     return true;
4342 
4343   // We can vectorize the tree if its size is greater than or equal to the
4344   // minimum size specified by the MinTreeSize command line option.
4345   if (VectorizableTree.size() >= MinTreeSize)
4346     return false;
4347 
4348   // If we have a tiny tree (a tree whose size is less than MinTreeSize), we
4349   // can vectorize it if we can prove it fully vectorizable.
4350   if (isFullyVectorizableTinyTree())
4351     return false;
4352 
4353   assert(VectorizableTree.empty()
4354              ? ExternalUses.empty()
4355              : true && "We shouldn't have any external users");
4356 
4357   // Otherwise, we can't vectorize the tree. It is both tiny and not fully
4358   // vectorizable.
4359   return true;
4360 }
4361 
4362 InstructionCost BoUpSLP::getSpillCost() const {
4363   // Walk from the bottom of the tree to the top, tracking which values are
4364   // live. When we see a call instruction that is not part of our tree,
4365   // query TTI to see if there is a cost to keeping values live over it
4366   // (for example, if spills and fills are required).
4367   unsigned BundleWidth = VectorizableTree.front()->Scalars.size();
4368   InstructionCost Cost = 0;
4369 
4370   SmallPtrSet<Instruction*, 4> LiveValues;
4371   Instruction *PrevInst = nullptr;
4372 
4373   // The entries in VectorizableTree are not necessarily ordered by their
4374   // position in basic blocks. Collect them and order them by dominance so later
4375   // instructions are guaranteed to be visited first. For instructions in
4376   // different basic blocks, we only scan to the beginning of the block, so
4377   // their order does not matter, as long as all instructions in a basic block
4378   // are grouped together. Using dominance ensures a deterministic order.
4379   SmallVector<Instruction *, 16> OrderedScalars;
4380   for (const auto &TEPtr : VectorizableTree) {
4381     Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]);
4382     if (!Inst)
4383       continue;
4384     OrderedScalars.push_back(Inst);
4385   }
4386   llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) {
4387     auto *NodeA = DT->getNode(A->getParent());
4388     auto *NodeB = DT->getNode(B->getParent());
4389     assert(NodeA && "Should only process reachable instructions");
4390     assert(NodeB && "Should only process reachable instructions");
4391     assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&
4392            "Different nodes should have different DFS numbers");
4393     if (NodeA != NodeB)
4394       return NodeA->getDFSNumIn() < NodeB->getDFSNumIn();
4395     return B->comesBefore(A);
4396   });
4397 
4398   for (Instruction *Inst : OrderedScalars) {
4399     if (!PrevInst) {
4400       PrevInst = Inst;
4401       continue;
4402     }
4403 
4404     // Update LiveValues.
4405     LiveValues.erase(PrevInst);
4406     for (auto &J : PrevInst->operands()) {
4407       if (isa<Instruction>(&*J) && getTreeEntry(&*J))
4408         LiveValues.insert(cast<Instruction>(&*J));
4409     }
4410 
4411     LLVM_DEBUG({
4412       dbgs() << "SLP: #LV: " << LiveValues.size();
4413       for (auto *X : LiveValues)
4414         dbgs() << " " << X->getName();
4415       dbgs() << ", Looking at ";
4416       Inst->dump();
4417     });
4418 
4419     // Now find the sequence of instructions between PrevInst and Inst.
4420     unsigned NumCalls = 0;
4421     BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(),
4422                                  PrevInstIt =
4423                                      PrevInst->getIterator().getReverse();
4424     while (InstIt != PrevInstIt) {
4425       if (PrevInstIt == PrevInst->getParent()->rend()) {
4426         PrevInstIt = Inst->getParent()->rbegin();
4427         continue;
4428       }
4429 
4430       // Debug information does not impact spill cost.
4431       if ((isa<CallInst>(&*PrevInstIt) &&
4432            !isa<DbgInfoIntrinsic>(&*PrevInstIt)) &&
4433           &*PrevInstIt != PrevInst)
4434         NumCalls++;
4435 
4436       ++PrevInstIt;
4437     }
4438 
4439     if (NumCalls) {
4440       SmallVector<Type*, 4> V;
4441       for (auto *II : LiveValues) {
4442         auto *ScalarTy = II->getType();
4443         if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy))
4444           ScalarTy = VectorTy->getElementType();
4445         V.push_back(FixedVectorType::get(ScalarTy, BundleWidth));
4446       }
4447       Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V);
4448     }
4449 
4450     PrevInst = Inst;
4451   }
4452 
4453   return Cost;
4454 }
4455 
4456 InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
4457   InstructionCost Cost = 0;
4458   LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size "
4459                     << VectorizableTree.size() << ".\n");
4460 
4461   unsigned BundleWidth = VectorizableTree[0]->Scalars.size();
4462 
4463   for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) {
4464     TreeEntry &TE = *VectorizableTree[I].get();
4465 
4466     InstructionCost C = getEntryCost(&TE, VectorizedVals);
4467     Cost += C;
4468     LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
4469                       << " for bundle that starts with " << *TE.Scalars[0]
4470                       << ".\n"
4471                       << "SLP: Current total cost = " << Cost << "\n");
4472   }
4473 
4474   SmallPtrSet<Value *, 16> ExtractCostCalculated;
4475   InstructionCost ExtractCost = 0;
4476   SmallVector<unsigned> VF;
4477   SmallVector<SmallVector<int>> ShuffleMask;
4478   SmallVector<Value *> FirstUsers;
4479   SmallVector<APInt> DemandedElts;
4480   for (ExternalUser &EU : ExternalUses) {
4481     // We only add extract cost once for the same scalar.
4482     if (!ExtractCostCalculated.insert(EU.Scalar).second)
4483       continue;
4484 
4485     // Uses by ephemeral values are free (because the ephemeral value will be
4486     // removed prior to code generation, and so the extraction will be
4487     // removed as well).
4488     if (EphValues.count(EU.User))
4489       continue;
4490 
4491     // No extract cost for vector "scalar"
4492     if (isa<FixedVectorType>(EU.Scalar->getType()))
4493       continue;
4494 
4495     // Already counted the cost for external uses when tried to adjust the cost
4496     // for extractelements, no need to add it again.
4497     if (isa<ExtractElementInst>(EU.Scalar))
4498       continue;
4499 
4500     // If found user is an insertelement, do not calculate extract cost but try
4501     // to detect it as a final shuffled/identity match.
4502     if (EU.User && isa<InsertElementInst>(EU.User)) {
4503       if (auto *FTy = dyn_cast<FixedVectorType>(EU.User->getType())) {
4504         Optional<int> InsertIdx = getInsertIndex(EU.User, 0);
4505         if (!InsertIdx || *InsertIdx == UndefMaskElem)
4506           continue;
4507         Value *VU = EU.User;
4508         auto *It = find_if(FirstUsers, [VU](Value *V) {
4509           // Checks if 2 insertelements are from the same buildvector.
4510           if (VU->getType() != V->getType())
4511             return false;
4512           auto *IE1 = cast<InsertElementInst>(VU);
4513           auto *IE2 = cast<InsertElementInst>(V);
4514           // Go though of insertelement instructions trying to find either VU as
4515           // the original vector for IE2 or V as the original vector for IE1.
4516           do {
4517             if (IE1 == VU || IE2 == V)
4518               return true;
4519             if (IE1)
4520               IE1 = dyn_cast<InsertElementInst>(IE1->getOperand(0));
4521             if (IE2)
4522               IE2 = dyn_cast<InsertElementInst>(IE2->getOperand(0));
4523           } while (IE1 || IE2);
4524           return false;
4525         });
4526         int VecId = -1;
4527         if (It == FirstUsers.end()) {
4528           VF.push_back(FTy->getNumElements());
4529           ShuffleMask.emplace_back(VF.back(), UndefMaskElem);
4530           FirstUsers.push_back(EU.User);
4531           DemandedElts.push_back(APInt::getNullValue(VF.back()));
4532           VecId = FirstUsers.size() - 1;
4533         } else {
4534           VecId = std::distance(FirstUsers.begin(), It);
4535         }
4536         int Idx = *InsertIdx;
4537         ShuffleMask[VecId][Idx] = EU.Lane;
4538         DemandedElts[VecId].setBit(Idx);
4539       }
4540     }
4541 
4542     // If we plan to rewrite the tree in a smaller type, we will need to sign
4543     // extend the extracted value back to the original type. Here, we account
4544     // for the extract and the added cost of the sign extend if needed.
4545     auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth);
4546     auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
4547     if (MinBWs.count(ScalarRoot)) {
4548       auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
4549       auto Extend =
4550           MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt;
4551       VecTy = FixedVectorType::get(MinTy, BundleWidth);
4552       ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(),
4553                                                    VecTy, EU.Lane);
4554     } else {
4555       ExtractCost +=
4556           TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane);
4557     }
4558   }
4559 
4560   InstructionCost SpillCost = getSpillCost();
4561   Cost += SpillCost + ExtractCost;
4562   for (int I = 0, E = FirstUsers.size(); I < E; ++I) {
4563     // For the very first element - simple shuffle of the source vector.
4564     int Limit = ShuffleMask[I].size() * 2;
4565     if (I == 0 &&
4566         all_of(ShuffleMask[I], [Limit](int Idx) { return Idx < Limit; }) &&
4567         !ShuffleVectorInst::isIdentityMask(ShuffleMask[I])) {
4568       InstructionCost C = TTI->getShuffleCost(
4569           TTI::SK_PermuteSingleSrc,
4570           cast<FixedVectorType>(FirstUsers[I]->getType()), ShuffleMask[I]);
4571       LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
4572                         << " for final shuffle of insertelement external users "
4573                         << *VectorizableTree.front()->Scalars.front() << ".\n"
4574                         << "SLP: Current total cost = " << Cost << "\n");
4575       Cost += C;
4576       continue;
4577     }
4578     // Other elements - permutation of 2 vectors (the initial one and the next
4579     // Ith incoming vector).
4580     unsigned VF = ShuffleMask[I].size();
4581     for (unsigned Idx = 0; Idx < VF; ++Idx) {
4582       int &Mask = ShuffleMask[I][Idx];
4583       Mask = Mask == UndefMaskElem ? Idx : VF + Mask;
4584     }
4585     InstructionCost C = TTI->getShuffleCost(
4586         TTI::SK_PermuteTwoSrc, cast<FixedVectorType>(FirstUsers[I]->getType()),
4587         ShuffleMask[I]);
4588     LLVM_DEBUG(
4589         dbgs()
4590         << "SLP: Adding cost " << C
4591         << " for final shuffle of vector node and external insertelement users "
4592         << *VectorizableTree.front()->Scalars.front() << ".\n"
4593         << "SLP: Current total cost = " << Cost << "\n");
4594     Cost += C;
4595     InstructionCost InsertCost = TTI->getScalarizationOverhead(
4596         cast<FixedVectorType>(FirstUsers[I]->getType()), DemandedElts[I],
4597         /*Insert*/ true,
4598         /*Extract*/ false);
4599     Cost -= InsertCost;
4600     LLVM_DEBUG(dbgs() << "SLP: subtracting the cost " << InsertCost
4601                       << " for insertelements gather.\n"
4602                       << "SLP: Current total cost = " << Cost << "\n");
4603   }
4604 
4605 #ifndef NDEBUG
4606   SmallString<256> Str;
4607   {
4608     raw_svector_ostream OS(Str);
4609     OS << "SLP: Spill Cost = " << SpillCost << ".\n"
4610        << "SLP: Extract Cost = " << ExtractCost << ".\n"
4611        << "SLP: Total Cost = " << Cost << ".\n";
4612   }
4613   LLVM_DEBUG(dbgs() << Str);
4614   if (ViewSLPTree)
4615     ViewGraph(this, "SLP" + F->getName(), false, Str);
4616 #endif
4617 
4618   return Cost;
4619 }
4620 
4621 Optional<TargetTransformInfo::ShuffleKind>
4622 BoUpSLP::isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask,
4623                                SmallVectorImpl<const TreeEntry *> &Entries) {
4624   // TODO: currently checking only for Scalars in the tree entry, need to count
4625   // reused elements too for better cost estimation.
4626   Mask.assign(TE->Scalars.size(), UndefMaskElem);
4627   Entries.clear();
4628   // Build a lists of values to tree entries.
4629   DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>> ValueToTEs;
4630   for (const std::unique_ptr<TreeEntry> &EntryPtr : VectorizableTree) {
4631     if (EntryPtr.get() == TE)
4632       break;
4633     if (EntryPtr->State != TreeEntry::NeedToGather)
4634       continue;
4635     for (Value *V : EntryPtr->Scalars)
4636       ValueToTEs.try_emplace(V).first->getSecond().insert(EntryPtr.get());
4637   }
4638   // Find all tree entries used by the gathered values. If no common entries
4639   // found - not a shuffle.
4640   // Here we build a set of tree nodes for each gathered value and trying to
4641   // find the intersection between these sets. If we have at least one common
4642   // tree node for each gathered value - we have just a permutation of the
4643   // single vector. If we have 2 different sets, we're in situation where we
4644   // have a permutation of 2 input vectors.
4645   SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs;
4646   DenseMap<Value *, int> UsedValuesEntry;
4647   for (Value *V : TE->Scalars) {
4648     if (isa<UndefValue>(V))
4649       continue;
4650     // Build a list of tree entries where V is used.
4651     SmallPtrSet<const TreeEntry *, 4> VToTEs;
4652     auto It = ValueToTEs.find(V);
4653     if (It != ValueToTEs.end())
4654       VToTEs = It->second;
4655     if (const TreeEntry *VTE = getTreeEntry(V))
4656       VToTEs.insert(VTE);
4657     if (VToTEs.empty())
4658       return None;
4659     if (UsedTEs.empty()) {
4660       // The first iteration, just insert the list of nodes to vector.
4661       UsedTEs.push_back(VToTEs);
4662     } else {
4663       // Need to check if there are any previously used tree nodes which use V.
4664       // If there are no such nodes, consider that we have another one input
4665       // vector.
4666       SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs);
4667       unsigned Idx = 0;
4668       for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) {
4669         // Do we have a non-empty intersection of previously listed tree entries
4670         // and tree entries using current V?
4671         set_intersect(VToTEs, Set);
4672         if (!VToTEs.empty()) {
4673           // Yes, write the new subset and continue analysis for the next
4674           // scalar.
4675           Set.swap(VToTEs);
4676           break;
4677         }
4678         VToTEs = SavedVToTEs;
4679         ++Idx;
4680       }
4681       // No non-empty intersection found - need to add a second set of possible
4682       // source vectors.
4683       if (Idx == UsedTEs.size()) {
4684         // If the number of input vectors is greater than 2 - not a permutation,
4685         // fallback to the regular gather.
4686         if (UsedTEs.size() == 2)
4687           return None;
4688         UsedTEs.push_back(SavedVToTEs);
4689         Idx = UsedTEs.size() - 1;
4690       }
4691       UsedValuesEntry.try_emplace(V, Idx);
4692     }
4693   }
4694 
4695   unsigned VF = 0;
4696   if (UsedTEs.size() == 1) {
4697     // Try to find the perfect match in another gather node at first.
4698     auto It = find_if(UsedTEs.front(), [TE](const TreeEntry *EntryPtr) {
4699       return EntryPtr->isSame(TE->Scalars);
4700     });
4701     if (It != UsedTEs.front().end()) {
4702       Entries.push_back(*It);
4703       std::iota(Mask.begin(), Mask.end(), 0);
4704       return TargetTransformInfo::SK_PermuteSingleSrc;
4705     }
4706     // No perfect match, just shuffle, so choose the first tree node.
4707     Entries.push_back(*UsedTEs.front().begin());
4708   } else {
4709     // Try to find nodes with the same vector factor.
4710     assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries.");
4711     // FIXME: Shall be replaced by GetVF function once non-power-2 patch is
4712     // landed.
4713     auto &&GetVF = [](const TreeEntry *TE) {
4714       if (!TE->ReuseShuffleIndices.empty())
4715         return TE->ReuseShuffleIndices.size();
4716       return TE->Scalars.size();
4717     };
4718     DenseMap<int, const TreeEntry *> VFToTE;
4719     for (const TreeEntry *TE : UsedTEs.front())
4720       VFToTE.try_emplace(GetVF(TE), TE);
4721     for (const TreeEntry *TE : UsedTEs.back()) {
4722       auto It = VFToTE.find(GetVF(TE));
4723       if (It != VFToTE.end()) {
4724         VF = It->first;
4725         Entries.push_back(It->second);
4726         Entries.push_back(TE);
4727         break;
4728       }
4729     }
4730     // No 2 source vectors with the same vector factor - give up and do regular
4731     // gather.
4732     if (Entries.empty())
4733       return None;
4734   }
4735 
4736   // Build a shuffle mask for better cost estimation and vector emission.
4737   for (int I = 0, E = TE->Scalars.size(); I < E; ++I) {
4738     Value *V = TE->Scalars[I];
4739     if (isa<UndefValue>(V))
4740       continue;
4741     unsigned Idx = UsedValuesEntry.lookup(V);
4742     const TreeEntry *VTE = Entries[Idx];
4743     int FoundLane = VTE->findLaneForValue(V);
4744     Mask[I] = Idx * VF + FoundLane;
4745     // Extra check required by isSingleSourceMaskImpl function (called by
4746     // ShuffleVectorInst::isSingleSourceMask).
4747     if (Mask[I] >= 2 * E)
4748       return None;
4749   }
4750   switch (Entries.size()) {
4751   case 1:
4752     return TargetTransformInfo::SK_PermuteSingleSrc;
4753   case 2:
4754     return TargetTransformInfo::SK_PermuteTwoSrc;
4755   default:
4756     break;
4757   }
4758   return None;
4759 }
4760 
4761 InstructionCost
4762 BoUpSLP::getGatherCost(FixedVectorType *Ty,
4763                        const DenseSet<unsigned> &ShuffledIndices) const {
4764   unsigned NumElts = Ty->getNumElements();
4765   APInt DemandedElts = APInt::getNullValue(NumElts);
4766   for (unsigned I = 0; I < NumElts; ++I)
4767     if (!ShuffledIndices.count(I))
4768       DemandedElts.setBit(I);
4769   InstructionCost Cost =
4770       TTI->getScalarizationOverhead(Ty, DemandedElts, /*Insert*/ true,
4771                                     /*Extract*/ false);
4772   if (!ShuffledIndices.empty())
4773     Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty);
4774   return Cost;
4775 }
4776 
4777 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const {
4778   // Find the type of the operands in VL.
4779   Type *ScalarTy = VL[0]->getType();
4780   if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
4781     ScalarTy = SI->getValueOperand()->getType();
4782   auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
4783   // Find the cost of inserting/extracting values from the vector.
4784   // Check if the same elements are inserted several times and count them as
4785   // shuffle candidates.
4786   DenseSet<unsigned> ShuffledElements;
4787   DenseSet<Value *> UniqueElements;
4788   // Iterate in reverse order to consider insert elements with the high cost.
4789   for (unsigned I = VL.size(); I > 0; --I) {
4790     unsigned Idx = I - 1;
4791     if (isConstant(VL[Idx]))
4792       continue;
4793     if (!UniqueElements.insert(VL[Idx]).second)
4794       ShuffledElements.insert(Idx);
4795   }
4796   return getGatherCost(VecTy, ShuffledElements);
4797 }
4798 
4799 // Perform operand reordering on the instructions in VL and return the reordered
4800 // operands in Left and Right.
4801 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
4802                                              SmallVectorImpl<Value *> &Left,
4803                                              SmallVectorImpl<Value *> &Right,
4804                                              const DataLayout &DL,
4805                                              ScalarEvolution &SE,
4806                                              const BoUpSLP &R) {
4807   if (VL.empty())
4808     return;
4809   VLOperands Ops(VL, DL, SE, R);
4810   // Reorder the operands in place.
4811   Ops.reorder();
4812   Left = Ops.getVL(0);
4813   Right = Ops.getVL(1);
4814 }
4815 
4816 void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) {
4817   // Get the basic block this bundle is in. All instructions in the bundle
4818   // should be in this block.
4819   auto *Front = E->getMainOp();
4820   auto *BB = Front->getParent();
4821   assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool {
4822     auto *I = cast<Instruction>(V);
4823     return !E->isOpcodeOrAlt(I) || I->getParent() == BB;
4824   }));
4825 
4826   // The last instruction in the bundle in program order.
4827   Instruction *LastInst = nullptr;
4828 
4829   // Find the last instruction. The common case should be that BB has been
4830   // scheduled, and the last instruction is VL.back(). So we start with
4831   // VL.back() and iterate over schedule data until we reach the end of the
4832   // bundle. The end of the bundle is marked by null ScheduleData.
4833   if (BlocksSchedules.count(BB)) {
4834     auto *Bundle =
4835         BlocksSchedules[BB]->getScheduleData(E->isOneOf(E->Scalars.back()));
4836     if (Bundle && Bundle->isPartOfBundle())
4837       for (; Bundle; Bundle = Bundle->NextInBundle)
4838         if (Bundle->OpValue == Bundle->Inst)
4839           LastInst = Bundle->Inst;
4840   }
4841 
4842   // LastInst can still be null at this point if there's either not an entry
4843   // for BB in BlocksSchedules or there's no ScheduleData available for
4844   // VL.back(). This can be the case if buildTree_rec aborts for various
4845   // reasons (e.g., the maximum recursion depth is reached, the maximum region
4846   // size is reached, etc.). ScheduleData is initialized in the scheduling
4847   // "dry-run".
4848   //
4849   // If this happens, we can still find the last instruction by brute force. We
4850   // iterate forwards from Front (inclusive) until we either see all
4851   // instructions in the bundle or reach the end of the block. If Front is the
4852   // last instruction in program order, LastInst will be set to Front, and we
4853   // will visit all the remaining instructions in the block.
4854   //
4855   // One of the reasons we exit early from buildTree_rec is to place an upper
4856   // bound on compile-time. Thus, taking an additional compile-time hit here is
4857   // not ideal. However, this should be exceedingly rare since it requires that
4858   // we both exit early from buildTree_rec and that the bundle be out-of-order
4859   // (causing us to iterate all the way to the end of the block).
4860   if (!LastInst) {
4861     SmallPtrSet<Value *, 16> Bundle(E->Scalars.begin(), E->Scalars.end());
4862     for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) {
4863       if (Bundle.erase(&I) && E->isOpcodeOrAlt(&I))
4864         LastInst = &I;
4865       if (Bundle.empty())
4866         break;
4867     }
4868   }
4869   assert(LastInst && "Failed to find last instruction in bundle");
4870 
4871   // Set the insertion point after the last instruction in the bundle. Set the
4872   // debug location to Front.
4873   Builder.SetInsertPoint(BB, ++LastInst->getIterator());
4874   Builder.SetCurrentDebugLocation(Front->getDebugLoc());
4875 }
4876 
4877 Value *BoUpSLP::gather(ArrayRef<Value *> VL) {
4878   // List of instructions/lanes from current block and/or the blocks which are
4879   // part of the current loop. These instructions will be inserted at the end to
4880   // make it possible to optimize loops and hoist invariant instructions out of
4881   // the loops body with better chances for success.
4882   SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts;
4883   SmallSet<int, 4> PostponedIndices;
4884   Loop *L = LI->getLoopFor(Builder.GetInsertBlock());
4885   auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) {
4886     SmallPtrSet<BasicBlock *, 4> Visited;
4887     while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second)
4888       InsertBB = InsertBB->getSinglePredecessor();
4889     return InsertBB && InsertBB == InstBB;
4890   };
4891   for (int I = 0, E = VL.size(); I < E; ++I) {
4892     if (auto *Inst = dyn_cast<Instruction>(VL[I]))
4893       if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) ||
4894            getTreeEntry(Inst) || (L && (L->contains(Inst)))) &&
4895           PostponedIndices.insert(I).second)
4896         PostponedInsts.emplace_back(Inst, I);
4897   }
4898 
4899   auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos) {
4900     Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(Pos));
4901     auto *InsElt = dyn_cast<InsertElementInst>(Vec);
4902     if (!InsElt)
4903       return Vec;
4904     GatherSeq.insert(InsElt);
4905     CSEBlocks.insert(InsElt->getParent());
4906     // Add to our 'need-to-extract' list.
4907     if (TreeEntry *Entry = getTreeEntry(V)) {
4908       // Find which lane we need to extract.
4909       unsigned FoundLane = Entry->findLaneForValue(V);
4910       ExternalUses.emplace_back(V, InsElt, FoundLane);
4911     }
4912     return Vec;
4913   };
4914   Value *Val0 =
4915       isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0];
4916   FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size());
4917   Value *Vec = PoisonValue::get(VecTy);
4918   SmallVector<int> NonConsts;
4919   // Insert constant values at first.
4920   for (int I = 0, E = VL.size(); I < E; ++I) {
4921     if (PostponedIndices.contains(I))
4922       continue;
4923     if (!isConstant(VL[I])) {
4924       NonConsts.push_back(I);
4925       continue;
4926     }
4927     Vec = CreateInsertElement(Vec, VL[I], I);
4928   }
4929   // Insert non-constant values.
4930   for (int I : NonConsts)
4931     Vec = CreateInsertElement(Vec, VL[I], I);
4932   // Append instructions, which are/may be part of the loop, in the end to make
4933   // it possible to hoist non-loop-based instructions.
4934   for (const std::pair<Value *, unsigned> &Pair : PostponedInsts)
4935     Vec = CreateInsertElement(Vec, Pair.first, Pair.second);
4936 
4937   return Vec;
4938 }
4939 
4940 namespace {
4941 /// Merges shuffle masks and emits final shuffle instruction, if required.
4942 class ShuffleInstructionBuilder {
4943   IRBuilderBase &Builder;
4944   const unsigned VF = 0;
4945   bool IsFinalized = false;
4946   SmallVector<int, 4> Mask;
4947 
4948 public:
4949   ShuffleInstructionBuilder(IRBuilderBase &Builder, unsigned VF)
4950       : Builder(Builder), VF(VF) {}
4951 
4952   /// Adds a mask, inverting it before applying.
4953   void addInversedMask(ArrayRef<unsigned> SubMask) {
4954     if (SubMask.empty())
4955       return;
4956     SmallVector<int, 4> NewMask;
4957     inversePermutation(SubMask, NewMask);
4958     addMask(NewMask);
4959   }
4960 
4961   /// Functions adds masks, merging them into  single one.
4962   void addMask(ArrayRef<unsigned> SubMask) {
4963     SmallVector<int, 4> NewMask(SubMask.begin(), SubMask.end());
4964     addMask(NewMask);
4965   }
4966 
4967   void addMask(ArrayRef<int> SubMask) { ::addMask(Mask, SubMask); }
4968 
4969   Value *finalize(Value *V) {
4970     IsFinalized = true;
4971     unsigned ValueVF = cast<FixedVectorType>(V->getType())->getNumElements();
4972     if (VF == ValueVF && Mask.empty())
4973       return V;
4974     SmallVector<int, 4> NormalizedMask(VF, UndefMaskElem);
4975     std::iota(NormalizedMask.begin(), NormalizedMask.end(), 0);
4976     addMask(NormalizedMask);
4977 
4978     if (VF == ValueVF && ShuffleVectorInst::isIdentityMask(Mask))
4979       return V;
4980     return Builder.CreateShuffleVector(V, Mask, "shuffle");
4981   }
4982 
4983   ~ShuffleInstructionBuilder() {
4984     assert((IsFinalized || Mask.empty()) &&
4985            "Shuffle construction must be finalized.");
4986   }
4987 };
4988 } // namespace
4989 
4990 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
4991   unsigned VF = VL.size();
4992   InstructionsState S = getSameOpcode(VL);
4993   if (S.getOpcode()) {
4994     if (TreeEntry *E = getTreeEntry(S.OpValue))
4995       if (E->isSame(VL)) {
4996         Value *V = vectorizeTree(E);
4997         if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) {
4998           if (!E->ReuseShuffleIndices.empty()) {
4999             // Reshuffle to get only unique values.
5000             // If some of the scalars are duplicated in the vectorization tree
5001             // entry, we do not vectorize them but instead generate a mask for
5002             // the reuses. But if there are several users of the same entry,
5003             // they may have different vectorization factors. This is especially
5004             // important for PHI nodes. In this case, we need to adapt the
5005             // resulting instruction for the user vectorization factor and have
5006             // to reshuffle it again to take only unique elements of the vector.
5007             // Without this code the function incorrectly returns reduced vector
5008             // instruction with the same elements, not with the unique ones.
5009 
5010             // block:
5011             // %phi = phi <2 x > { .., %entry} {%shuffle, %block}
5012             // %2 = shuffle <2 x > %phi, %poison, <4 x > <0, 0, 1, 1>
5013             // ... (use %2)
5014             // %shuffle = shuffle <2 x> %2, poison, <2 x> {0, 2}
5015             // br %block
5016             SmallVector<int> UniqueIdxs;
5017             SmallSet<int, 4> UsedIdxs;
5018             int Pos = 0;
5019             int Sz = VL.size();
5020             for (int Idx : E->ReuseShuffleIndices) {
5021               if (Idx != Sz && UsedIdxs.insert(Idx).second)
5022                 UniqueIdxs.emplace_back(Pos);
5023               ++Pos;
5024             }
5025             assert(VF >= UsedIdxs.size() && "Expected vectorization factor "
5026                                             "less than original vector size.");
5027             UniqueIdxs.append(VF - UsedIdxs.size(), UndefMaskElem);
5028             V = Builder.CreateShuffleVector(V, UniqueIdxs, "shrink.shuffle");
5029           } else {
5030             assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() &&
5031                    "Expected vectorization factor less "
5032                    "than original vector size.");
5033             SmallVector<int> UniformMask(VF, 0);
5034             std::iota(UniformMask.begin(), UniformMask.end(), 0);
5035             V = Builder.CreateShuffleVector(V, UniformMask, "shrink.shuffle");
5036           }
5037         }
5038         return V;
5039       }
5040   }
5041 
5042   // Check that every instruction appears once in this bundle.
5043   SmallVector<int> ReuseShuffleIndicies;
5044   SmallVector<Value *> UniqueValues;
5045   if (VL.size() > 2) {
5046     DenseMap<Value *, unsigned> UniquePositions;
5047     unsigned NumValues =
5048         std::distance(VL.begin(), find_if(reverse(VL), [](Value *V) {
5049                                     return !isa<UndefValue>(V);
5050                                   }).base());
5051     VF = std::max<unsigned>(VF, PowerOf2Ceil(NumValues));
5052     int UniqueVals = 0;
5053     for (Value *V : VL.drop_back(VL.size() - VF)) {
5054       if (isa<UndefValue>(V)) {
5055         ReuseShuffleIndicies.emplace_back(UndefMaskElem);
5056         continue;
5057       }
5058       if (isConstant(V)) {
5059         ReuseShuffleIndicies.emplace_back(UniqueValues.size());
5060         UniqueValues.emplace_back(V);
5061         continue;
5062       }
5063       auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
5064       ReuseShuffleIndicies.emplace_back(Res.first->second);
5065       if (Res.second) {
5066         UniqueValues.emplace_back(V);
5067         ++UniqueVals;
5068       }
5069     }
5070     if (UniqueVals == 1 && UniqueValues.size() == 1) {
5071       // Emit pure splat vector.
5072       ReuseShuffleIndicies.append(VF - ReuseShuffleIndicies.size(),
5073                                   UndefMaskElem);
5074     } else if (UniqueValues.size() >= VF - 1 || UniqueValues.size() <= 1) {
5075       ReuseShuffleIndicies.clear();
5076       UniqueValues.clear();
5077       UniqueValues.append(VL.begin(), std::next(VL.begin(), NumValues));
5078     }
5079     UniqueValues.append(VF - UniqueValues.size(),
5080                         PoisonValue::get(VL[0]->getType()));
5081     VL = UniqueValues;
5082   }
5083 
5084   ShuffleInstructionBuilder ShuffleBuilder(Builder, VF);
5085   Value *Vec = gather(VL);
5086   if (!ReuseShuffleIndicies.empty()) {
5087     ShuffleBuilder.addMask(ReuseShuffleIndicies);
5088     Vec = ShuffleBuilder.finalize(Vec);
5089     if (auto *I = dyn_cast<Instruction>(Vec)) {
5090       GatherSeq.insert(I);
5091       CSEBlocks.insert(I->getParent());
5092     }
5093   }
5094   return Vec;
5095 }
5096 
5097 Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
5098   IRBuilder<>::InsertPointGuard Guard(Builder);
5099 
5100   if (E->VectorizedValue) {
5101     LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
5102     return E->VectorizedValue;
5103   }
5104 
5105   bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
5106   unsigned VF = E->Scalars.size();
5107   if (NeedToShuffleReuses)
5108     VF = E->ReuseShuffleIndices.size();
5109   ShuffleInstructionBuilder ShuffleBuilder(Builder, VF);
5110   if (E->State == TreeEntry::NeedToGather) {
5111     setInsertPointAfterBundle(E);
5112     Value *Vec;
5113     SmallVector<int> Mask;
5114     SmallVector<const TreeEntry *> Entries;
5115     Optional<TargetTransformInfo::ShuffleKind> Shuffle =
5116         isGatherShuffledEntry(E, Mask, Entries);
5117     if (Shuffle.hasValue()) {
5118       assert((Entries.size() == 1 || Entries.size() == 2) &&
5119              "Expected shuffle of 1 or 2 entries.");
5120       Vec = Builder.CreateShuffleVector(Entries.front()->VectorizedValue,
5121                                         Entries.back()->VectorizedValue, Mask);
5122     } else {
5123       Vec = gather(E->Scalars);
5124     }
5125     if (NeedToShuffleReuses) {
5126       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5127       Vec = ShuffleBuilder.finalize(Vec);
5128       if (auto *I = dyn_cast<Instruction>(Vec)) {
5129         GatherSeq.insert(I);
5130         CSEBlocks.insert(I->getParent());
5131       }
5132     }
5133     E->VectorizedValue = Vec;
5134     return Vec;
5135   }
5136 
5137   assert((E->State == TreeEntry::Vectorize ||
5138           E->State == TreeEntry::ScatterVectorize) &&
5139          "Unhandled state");
5140   unsigned ShuffleOrOp =
5141       E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
5142   Instruction *VL0 = E->getMainOp();
5143   Type *ScalarTy = VL0->getType();
5144   if (auto *Store = dyn_cast<StoreInst>(VL0))
5145     ScalarTy = Store->getValueOperand()->getType();
5146   else if (auto *IE = dyn_cast<InsertElementInst>(VL0))
5147     ScalarTy = IE->getOperand(1)->getType();
5148   auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size());
5149   switch (ShuffleOrOp) {
5150     case Instruction::PHI: {
5151       auto *PH = cast<PHINode>(VL0);
5152       Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI());
5153       Builder.SetCurrentDebugLocation(PH->getDebugLoc());
5154       PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
5155       Value *V = NewPhi;
5156       if (NeedToShuffleReuses)
5157         V = Builder.CreateShuffleVector(V, E->ReuseShuffleIndices, "shuffle");
5158 
5159       E->VectorizedValue = V;
5160 
5161       // PHINodes may have multiple entries from the same block. We want to
5162       // visit every block once.
5163       SmallPtrSet<BasicBlock*, 4> VisitedBBs;
5164 
5165       for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
5166         ValueList Operands;
5167         BasicBlock *IBB = PH->getIncomingBlock(i);
5168 
5169         if (!VisitedBBs.insert(IBB).second) {
5170           NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
5171           continue;
5172         }
5173 
5174         Builder.SetInsertPoint(IBB->getTerminator());
5175         Builder.SetCurrentDebugLocation(PH->getDebugLoc());
5176         Value *Vec = vectorizeTree(E->getOperand(i));
5177         NewPhi->addIncoming(Vec, IBB);
5178       }
5179 
5180       assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&
5181              "Invalid number of incoming values");
5182       return V;
5183     }
5184 
5185     case Instruction::ExtractElement: {
5186       Value *V = E->getSingleOperand(0);
5187       Builder.SetInsertPoint(VL0);
5188       ShuffleBuilder.addInversedMask(E->ReorderIndices);
5189       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5190       V = ShuffleBuilder.finalize(V);
5191       E->VectorizedValue = V;
5192       return V;
5193     }
5194     case Instruction::ExtractValue: {
5195       auto *LI = cast<LoadInst>(E->getSingleOperand(0));
5196       Builder.SetInsertPoint(LI);
5197       auto *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace());
5198       Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy);
5199       LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign());
5200       Value *NewV = propagateMetadata(V, E->Scalars);
5201       ShuffleBuilder.addInversedMask(E->ReorderIndices);
5202       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5203       NewV = ShuffleBuilder.finalize(NewV);
5204       E->VectorizedValue = NewV;
5205       return NewV;
5206     }
5207     case Instruction::InsertElement: {
5208       Builder.SetInsertPoint(VL0);
5209       Value *V = vectorizeTree(E->getOperand(1));
5210 
5211       const unsigned NumElts =
5212           cast<FixedVectorType>(VL0->getType())->getNumElements();
5213       const unsigned NumScalars = E->Scalars.size();
5214 
5215       // Create InsertVector shuffle if necessary
5216       Instruction *FirstInsert = nullptr;
5217       bool IsIdentity = true;
5218       unsigned Offset = UINT_MAX;
5219       for (unsigned I = 0; I < NumScalars; ++I) {
5220         Value *Scalar = E->Scalars[I];
5221         if (!FirstInsert &&
5222             !is_contained(E->Scalars, cast<Instruction>(Scalar)->getOperand(0)))
5223           FirstInsert = cast<Instruction>(Scalar);
5224         Optional<int> InsertIdx = getInsertIndex(Scalar, 0);
5225         if (!InsertIdx || *InsertIdx == UndefMaskElem)
5226           continue;
5227         unsigned Idx = *InsertIdx;
5228         if (Idx < Offset) {
5229           Offset = Idx;
5230           IsIdentity &= I == 0;
5231         } else {
5232           assert(Idx >= Offset && "Failed to find vector index offset");
5233           IsIdentity &= Idx - Offset == I;
5234         }
5235       }
5236       assert(Offset < NumElts && "Failed to find vector index offset");
5237 
5238       // Create shuffle to resize vector
5239       SmallVector<int> Mask(NumElts, UndefMaskElem);
5240       if (!IsIdentity) {
5241         for (unsigned I = 0; I < NumScalars; ++I) {
5242           Value *Scalar = E->Scalars[I];
5243           Optional<int> InsertIdx = getInsertIndex(Scalar, 0);
5244           if (!InsertIdx || *InsertIdx == UndefMaskElem)
5245             continue;
5246           Mask[*InsertIdx - Offset] = I;
5247         }
5248       } else {
5249         std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0);
5250       }
5251       if (!IsIdentity || NumElts != NumScalars)
5252         V = Builder.CreateShuffleVector(V, Mask);
5253 
5254       if ((!IsIdentity || Offset != 0 ||
5255            !isa<UndefValue>(FirstInsert->getOperand(0))) &&
5256           NumElts != NumScalars) {
5257         SmallVector<int> InsertMask(NumElts);
5258         std::iota(InsertMask.begin(), InsertMask.end(), 0);
5259         for (unsigned I = 0; I < NumElts; I++) {
5260           if (Mask[I] != UndefMaskElem)
5261             InsertMask[Offset + I] = NumElts + I;
5262         }
5263 
5264         V = Builder.CreateShuffleVector(
5265             FirstInsert->getOperand(0), V, InsertMask,
5266             cast<Instruction>(E->Scalars.back())->getName());
5267       }
5268 
5269       ++NumVectorInstructions;
5270       E->VectorizedValue = V;
5271       return V;
5272     }
5273     case Instruction::ZExt:
5274     case Instruction::SExt:
5275     case Instruction::FPToUI:
5276     case Instruction::FPToSI:
5277     case Instruction::FPExt:
5278     case Instruction::PtrToInt:
5279     case Instruction::IntToPtr:
5280     case Instruction::SIToFP:
5281     case Instruction::UIToFP:
5282     case Instruction::Trunc:
5283     case Instruction::FPTrunc:
5284     case Instruction::BitCast: {
5285       setInsertPointAfterBundle(E);
5286 
5287       Value *InVec = vectorizeTree(E->getOperand(0));
5288 
5289       if (E->VectorizedValue) {
5290         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
5291         return E->VectorizedValue;
5292       }
5293 
5294       auto *CI = cast<CastInst>(VL0);
5295       Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy);
5296       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5297       V = ShuffleBuilder.finalize(V);
5298 
5299       E->VectorizedValue = V;
5300       ++NumVectorInstructions;
5301       return V;
5302     }
5303     case Instruction::FCmp:
5304     case Instruction::ICmp: {
5305       setInsertPointAfterBundle(E);
5306 
5307       Value *L = vectorizeTree(E->getOperand(0));
5308       Value *R = vectorizeTree(E->getOperand(1));
5309 
5310       if (E->VectorizedValue) {
5311         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
5312         return E->VectorizedValue;
5313       }
5314 
5315       CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
5316       Value *V = Builder.CreateCmp(P0, L, R);
5317       propagateIRFlags(V, E->Scalars, VL0);
5318       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5319       V = ShuffleBuilder.finalize(V);
5320 
5321       E->VectorizedValue = V;
5322       ++NumVectorInstructions;
5323       return V;
5324     }
5325     case Instruction::Select: {
5326       setInsertPointAfterBundle(E);
5327 
5328       Value *Cond = vectorizeTree(E->getOperand(0));
5329       Value *True = vectorizeTree(E->getOperand(1));
5330       Value *False = vectorizeTree(E->getOperand(2));
5331 
5332       if (E->VectorizedValue) {
5333         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
5334         return E->VectorizedValue;
5335       }
5336 
5337       Value *V = Builder.CreateSelect(Cond, True, False);
5338       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5339       V = ShuffleBuilder.finalize(V);
5340 
5341       E->VectorizedValue = V;
5342       ++NumVectorInstructions;
5343       return V;
5344     }
5345     case Instruction::FNeg: {
5346       setInsertPointAfterBundle(E);
5347 
5348       Value *Op = vectorizeTree(E->getOperand(0));
5349 
5350       if (E->VectorizedValue) {
5351         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
5352         return E->VectorizedValue;
5353       }
5354 
5355       Value *V = Builder.CreateUnOp(
5356           static_cast<Instruction::UnaryOps>(E->getOpcode()), Op);
5357       propagateIRFlags(V, E->Scalars, VL0);
5358       if (auto *I = dyn_cast<Instruction>(V))
5359         V = propagateMetadata(I, E->Scalars);
5360 
5361       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5362       V = ShuffleBuilder.finalize(V);
5363 
5364       E->VectorizedValue = V;
5365       ++NumVectorInstructions;
5366 
5367       return V;
5368     }
5369     case Instruction::Add:
5370     case Instruction::FAdd:
5371     case Instruction::Sub:
5372     case Instruction::FSub:
5373     case Instruction::Mul:
5374     case Instruction::FMul:
5375     case Instruction::UDiv:
5376     case Instruction::SDiv:
5377     case Instruction::FDiv:
5378     case Instruction::URem:
5379     case Instruction::SRem:
5380     case Instruction::FRem:
5381     case Instruction::Shl:
5382     case Instruction::LShr:
5383     case Instruction::AShr:
5384     case Instruction::And:
5385     case Instruction::Or:
5386     case Instruction::Xor: {
5387       setInsertPointAfterBundle(E);
5388 
5389       Value *LHS = vectorizeTree(E->getOperand(0));
5390       Value *RHS = vectorizeTree(E->getOperand(1));
5391 
5392       if (E->VectorizedValue) {
5393         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
5394         return E->VectorizedValue;
5395       }
5396 
5397       Value *V = Builder.CreateBinOp(
5398           static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS,
5399           RHS);
5400       propagateIRFlags(V, E->Scalars, VL0);
5401       if (auto *I = dyn_cast<Instruction>(V))
5402         V = propagateMetadata(I, E->Scalars);
5403 
5404       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5405       V = ShuffleBuilder.finalize(V);
5406 
5407       E->VectorizedValue = V;
5408       ++NumVectorInstructions;
5409 
5410       return V;
5411     }
5412     case Instruction::Load: {
5413       // Loads are inserted at the head of the tree because we don't want to
5414       // sink them all the way down past store instructions.
5415       bool IsReorder = E->updateStateIfReorder();
5416       if (IsReorder)
5417         VL0 = E->getMainOp();
5418       setInsertPointAfterBundle(E);
5419 
5420       LoadInst *LI = cast<LoadInst>(VL0);
5421       Instruction *NewLI;
5422       unsigned AS = LI->getPointerAddressSpace();
5423       Value *PO = LI->getPointerOperand();
5424       if (E->State == TreeEntry::Vectorize) {
5425 
5426         Value *VecPtr = Builder.CreateBitCast(PO, VecTy->getPointerTo(AS));
5427 
5428         // The pointer operand uses an in-tree scalar so we add the new BitCast
5429         // to ExternalUses list to make sure that an extract will be generated
5430         // in the future.
5431         if (getTreeEntry(PO))
5432           ExternalUses.emplace_back(PO, cast<User>(VecPtr), 0);
5433 
5434         NewLI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign());
5435       } else {
5436         assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state");
5437         Value *VecPtr = vectorizeTree(E->getOperand(0));
5438         // Use the minimum alignment of the gathered loads.
5439         Align CommonAlignment = LI->getAlign();
5440         for (Value *V : E->Scalars)
5441           CommonAlignment =
5442               commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign());
5443         NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment);
5444       }
5445       Value *V = propagateMetadata(NewLI, E->Scalars);
5446 
5447       ShuffleBuilder.addInversedMask(E->ReorderIndices);
5448       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5449       V = ShuffleBuilder.finalize(V);
5450       E->VectorizedValue = V;
5451       ++NumVectorInstructions;
5452       return V;
5453     }
5454     case Instruction::Store: {
5455       bool IsReorder = !E->ReorderIndices.empty();
5456       auto *SI = cast<StoreInst>(
5457           IsReorder ? E->Scalars[E->ReorderIndices.front()] : VL0);
5458       unsigned AS = SI->getPointerAddressSpace();
5459 
5460       setInsertPointAfterBundle(E);
5461 
5462       Value *VecValue = vectorizeTree(E->getOperand(0));
5463       ShuffleBuilder.addMask(E->ReorderIndices);
5464       VecValue = ShuffleBuilder.finalize(VecValue);
5465 
5466       Value *ScalarPtr = SI->getPointerOperand();
5467       Value *VecPtr = Builder.CreateBitCast(
5468           ScalarPtr, VecValue->getType()->getPointerTo(AS));
5469       StoreInst *ST = Builder.CreateAlignedStore(VecValue, VecPtr,
5470                                                  SI->getAlign());
5471 
5472       // The pointer operand uses an in-tree scalar, so add the new BitCast to
5473       // ExternalUses to make sure that an extract will be generated in the
5474       // future.
5475       if (getTreeEntry(ScalarPtr))
5476         ExternalUses.push_back(ExternalUser(ScalarPtr, cast<User>(VecPtr), 0));
5477 
5478       Value *V = propagateMetadata(ST, E->Scalars);
5479 
5480       E->VectorizedValue = V;
5481       ++NumVectorInstructions;
5482       return V;
5483     }
5484     case Instruction::GetElementPtr: {
5485       setInsertPointAfterBundle(E);
5486 
5487       Value *Op0 = vectorizeTree(E->getOperand(0));
5488 
5489       std::vector<Value *> OpVecs;
5490       for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e;
5491            ++j) {
5492         ValueList &VL = E->getOperand(j);
5493         // Need to cast all elements to the same type before vectorization to
5494         // avoid crash.
5495         Type *VL0Ty = VL0->getOperand(j)->getType();
5496         Type *Ty = llvm::all_of(
5497                        VL, [VL0Ty](Value *V) { return VL0Ty == V->getType(); })
5498                        ? VL0Ty
5499                        : DL->getIndexType(cast<GetElementPtrInst>(VL0)
5500                                               ->getPointerOperandType()
5501                                               ->getScalarType());
5502         for (Value *&V : VL) {
5503           auto *CI = cast<ConstantInt>(V);
5504           V = ConstantExpr::getIntegerCast(CI, Ty,
5505                                            CI->getValue().isSignBitSet());
5506         }
5507         Value *OpVec = vectorizeTree(VL);
5508         OpVecs.push_back(OpVec);
5509       }
5510 
5511       Value *V = Builder.CreateGEP(
5512           cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs);
5513       if (Instruction *I = dyn_cast<Instruction>(V))
5514         V = propagateMetadata(I, E->Scalars);
5515 
5516       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5517       V = ShuffleBuilder.finalize(V);
5518 
5519       E->VectorizedValue = V;
5520       ++NumVectorInstructions;
5521 
5522       return V;
5523     }
5524     case Instruction::Call: {
5525       CallInst *CI = cast<CallInst>(VL0);
5526       setInsertPointAfterBundle(E);
5527 
5528       Intrinsic::ID IID  = Intrinsic::not_intrinsic;
5529       if (Function *FI = CI->getCalledFunction())
5530         IID = FI->getIntrinsicID();
5531 
5532       Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
5533 
5534       auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI);
5535       bool UseIntrinsic = ID != Intrinsic::not_intrinsic &&
5536                           VecCallCosts.first <= VecCallCosts.second;
5537 
5538       Value *ScalarArg = nullptr;
5539       std::vector<Value *> OpVecs;
5540       SmallVector<Type *, 2> TysForDecl =
5541           {FixedVectorType::get(CI->getType(), E->Scalars.size())};
5542       for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) {
5543         ValueList OpVL;
5544         // Some intrinsics have scalar arguments. This argument should not be
5545         // vectorized.
5546         if (UseIntrinsic && hasVectorInstrinsicScalarOpd(IID, j)) {
5547           CallInst *CEI = cast<CallInst>(VL0);
5548           ScalarArg = CEI->getArgOperand(j);
5549           OpVecs.push_back(CEI->getArgOperand(j));
5550           if (hasVectorInstrinsicOverloadedScalarOpd(IID, j))
5551             TysForDecl.push_back(ScalarArg->getType());
5552           continue;
5553         }
5554 
5555         Value *OpVec = vectorizeTree(E->getOperand(j));
5556         LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n");
5557         OpVecs.push_back(OpVec);
5558       }
5559 
5560       Function *CF;
5561       if (!UseIntrinsic) {
5562         VFShape Shape =
5563             VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>(
5564                                   VecTy->getNumElements())),
5565                          false /*HasGlobalPred*/);
5566         CF = VFDatabase(*CI).getVectorizedFunction(Shape);
5567       } else {
5568         CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl);
5569       }
5570 
5571       SmallVector<OperandBundleDef, 1> OpBundles;
5572       CI->getOperandBundlesAsDefs(OpBundles);
5573       Value *V = Builder.CreateCall(CF, OpVecs, OpBundles);
5574 
5575       // The scalar argument uses an in-tree scalar so we add the new vectorized
5576       // call to ExternalUses list to make sure that an extract will be
5577       // generated in the future.
5578       if (ScalarArg && getTreeEntry(ScalarArg))
5579         ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0));
5580 
5581       propagateIRFlags(V, E->Scalars, VL0);
5582       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5583       V = ShuffleBuilder.finalize(V);
5584 
5585       E->VectorizedValue = V;
5586       ++NumVectorInstructions;
5587       return V;
5588     }
5589     case Instruction::ShuffleVector: {
5590       assert(E->isAltShuffle() &&
5591              ((Instruction::isBinaryOp(E->getOpcode()) &&
5592                Instruction::isBinaryOp(E->getAltOpcode())) ||
5593               (Instruction::isCast(E->getOpcode()) &&
5594                Instruction::isCast(E->getAltOpcode()))) &&
5595              "Invalid Shuffle Vector Operand");
5596 
5597       Value *LHS = nullptr, *RHS = nullptr;
5598       if (Instruction::isBinaryOp(E->getOpcode())) {
5599         setInsertPointAfterBundle(E);
5600         LHS = vectorizeTree(E->getOperand(0));
5601         RHS = vectorizeTree(E->getOperand(1));
5602       } else {
5603         setInsertPointAfterBundle(E);
5604         LHS = vectorizeTree(E->getOperand(0));
5605       }
5606 
5607       if (E->VectorizedValue) {
5608         LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
5609         return E->VectorizedValue;
5610       }
5611 
5612       Value *V0, *V1;
5613       if (Instruction::isBinaryOp(E->getOpcode())) {
5614         V0 = Builder.CreateBinOp(
5615             static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS);
5616         V1 = Builder.CreateBinOp(
5617             static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS);
5618       } else {
5619         V0 = Builder.CreateCast(
5620             static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy);
5621         V1 = Builder.CreateCast(
5622             static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy);
5623       }
5624 
5625       // Create shuffle to take alternate operations from the vector.
5626       // Also, gather up main and alt scalar ops to propagate IR flags to
5627       // each vector operation.
5628       ValueList OpScalars, AltScalars;
5629       unsigned Sz = E->Scalars.size();
5630       SmallVector<int> Mask(Sz);
5631       for (unsigned I = 0; I < Sz; ++I) {
5632         auto *OpInst = cast<Instruction>(E->Scalars[I]);
5633         assert(E->isOpcodeOrAlt(OpInst) && "Unexpected main/alternate opcode");
5634         if (OpInst->getOpcode() == E->getAltOpcode()) {
5635           Mask[I] = Sz + I;
5636           AltScalars.push_back(E->Scalars[I]);
5637         } else {
5638           Mask[I] = I;
5639           OpScalars.push_back(E->Scalars[I]);
5640         }
5641       }
5642 
5643       propagateIRFlags(V0, OpScalars);
5644       propagateIRFlags(V1, AltScalars);
5645 
5646       Value *V = Builder.CreateShuffleVector(V0, V1, Mask);
5647       if (Instruction *I = dyn_cast<Instruction>(V))
5648         V = propagateMetadata(I, E->Scalars);
5649       ShuffleBuilder.addMask(E->ReuseShuffleIndices);
5650       V = ShuffleBuilder.finalize(V);
5651 
5652       E->VectorizedValue = V;
5653       ++NumVectorInstructions;
5654 
5655       return V;
5656     }
5657     default:
5658     llvm_unreachable("unknown inst");
5659   }
5660   return nullptr;
5661 }
5662 
5663 Value *BoUpSLP::vectorizeTree() {
5664   ExtraValueToDebugLocsMap ExternallyUsedValues;
5665   return vectorizeTree(ExternallyUsedValues);
5666 }
5667 
5668 Value *
5669 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) {
5670   // All blocks must be scheduled before any instructions are inserted.
5671   for (auto &BSIter : BlocksSchedules) {
5672     scheduleBlock(BSIter.second.get());
5673   }
5674 
5675   Builder.SetInsertPoint(&F->getEntryBlock().front());
5676   auto *VectorRoot = vectorizeTree(VectorizableTree[0].get());
5677 
5678   // If the vectorized tree can be rewritten in a smaller type, we truncate the
5679   // vectorized root. InstCombine will then rewrite the entire expression. We
5680   // sign extend the extracted values below.
5681   auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
5682   if (MinBWs.count(ScalarRoot)) {
5683     if (auto *I = dyn_cast<Instruction>(VectorRoot)) {
5684       // If current instr is a phi and not the last phi, insert it after the
5685       // last phi node.
5686       if (isa<PHINode>(I))
5687         Builder.SetInsertPoint(&*I->getParent()->getFirstInsertionPt());
5688       else
5689         Builder.SetInsertPoint(&*++BasicBlock::iterator(I));
5690     }
5691     auto BundleWidth = VectorizableTree[0]->Scalars.size();
5692     auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
5693     auto *VecTy = FixedVectorType::get(MinTy, BundleWidth);
5694     auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy);
5695     VectorizableTree[0]->VectorizedValue = Trunc;
5696   }
5697 
5698   LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size()
5699                     << " values .\n");
5700 
5701   // Extract all of the elements with the external uses.
5702   for (const auto &ExternalUse : ExternalUses) {
5703     Value *Scalar = ExternalUse.Scalar;
5704     llvm::User *User = ExternalUse.User;
5705 
5706     // Skip users that we already RAUW. This happens when one instruction
5707     // has multiple uses of the same value.
5708     if (User && !is_contained(Scalar->users(), User))
5709       continue;
5710     TreeEntry *E = getTreeEntry(Scalar);
5711     assert(E && "Invalid scalar");
5712     assert(E->State != TreeEntry::NeedToGather &&
5713            "Extracting from a gather list");
5714 
5715     Value *Vec = E->VectorizedValue;
5716     assert(Vec && "Can't find vectorizable value");
5717 
5718     Value *Lane = Builder.getInt32(ExternalUse.Lane);
5719     auto ExtractAndExtendIfNeeded = [&](Value *Vec) {
5720       if (Scalar->getType() != Vec->getType()) {
5721         Value *Ex;
5722         // "Reuse" the existing extract to improve final codegen.
5723         if (auto *ES = dyn_cast<ExtractElementInst>(Scalar)) {
5724           Ex = Builder.CreateExtractElement(ES->getOperand(0),
5725                                             ES->getOperand(1));
5726         } else {
5727           Ex = Builder.CreateExtractElement(Vec, Lane);
5728         }
5729         // If necessary, sign-extend or zero-extend ScalarRoot
5730         // to the larger type.
5731         if (!MinBWs.count(ScalarRoot))
5732           return Ex;
5733         if (MinBWs[ScalarRoot].second)
5734           return Builder.CreateSExt(Ex, Scalar->getType());
5735         return Builder.CreateZExt(Ex, Scalar->getType());
5736       }
5737       assert(isa<FixedVectorType>(Scalar->getType()) &&
5738              isa<InsertElementInst>(Scalar) &&
5739              "In-tree scalar of vector type is not insertelement?");
5740       return Vec;
5741     };
5742     // If User == nullptr, the Scalar is used as extra arg. Generate
5743     // ExtractElement instruction and update the record for this scalar in
5744     // ExternallyUsedValues.
5745     if (!User) {
5746       assert(ExternallyUsedValues.count(Scalar) &&
5747              "Scalar with nullptr as an external user must be registered in "
5748              "ExternallyUsedValues map");
5749       if (auto *VecI = dyn_cast<Instruction>(Vec)) {
5750         Builder.SetInsertPoint(VecI->getParent(),
5751                                std::next(VecI->getIterator()));
5752       } else {
5753         Builder.SetInsertPoint(&F->getEntryBlock().front());
5754       }
5755       Value *NewInst = ExtractAndExtendIfNeeded(Vec);
5756       CSEBlocks.insert(cast<Instruction>(Scalar)->getParent());
5757       auto &NewInstLocs = ExternallyUsedValues[NewInst];
5758       auto It = ExternallyUsedValues.find(Scalar);
5759       assert(It != ExternallyUsedValues.end() &&
5760              "Externally used scalar is not found in ExternallyUsedValues");
5761       NewInstLocs.append(It->second);
5762       ExternallyUsedValues.erase(Scalar);
5763       // Required to update internally referenced instructions.
5764       Scalar->replaceAllUsesWith(NewInst);
5765       continue;
5766     }
5767 
5768     // Generate extracts for out-of-tree users.
5769     // Find the insertion point for the extractelement lane.
5770     if (auto *VecI = dyn_cast<Instruction>(Vec)) {
5771       if (PHINode *PH = dyn_cast<PHINode>(User)) {
5772         for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) {
5773           if (PH->getIncomingValue(i) == Scalar) {
5774             Instruction *IncomingTerminator =
5775                 PH->getIncomingBlock(i)->getTerminator();
5776             if (isa<CatchSwitchInst>(IncomingTerminator)) {
5777               Builder.SetInsertPoint(VecI->getParent(),
5778                                      std::next(VecI->getIterator()));
5779             } else {
5780               Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator());
5781             }
5782             Value *NewInst = ExtractAndExtendIfNeeded(Vec);
5783             CSEBlocks.insert(PH->getIncomingBlock(i));
5784             PH->setOperand(i, NewInst);
5785           }
5786         }
5787       } else {
5788         Builder.SetInsertPoint(cast<Instruction>(User));
5789         Value *NewInst = ExtractAndExtendIfNeeded(Vec);
5790         CSEBlocks.insert(cast<Instruction>(User)->getParent());
5791         User->replaceUsesOfWith(Scalar, NewInst);
5792       }
5793     } else {
5794       Builder.SetInsertPoint(&F->getEntryBlock().front());
5795       Value *NewInst = ExtractAndExtendIfNeeded(Vec);
5796       CSEBlocks.insert(&F->getEntryBlock());
5797       User->replaceUsesOfWith(Scalar, NewInst);
5798     }
5799 
5800     LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n");
5801   }
5802 
5803   // For each vectorized value:
5804   for (auto &TEPtr : VectorizableTree) {
5805     TreeEntry *Entry = TEPtr.get();
5806 
5807     // No need to handle users of gathered values.
5808     if (Entry->State == TreeEntry::NeedToGather)
5809       continue;
5810 
5811     assert(Entry->VectorizedValue && "Can't find vectorizable value");
5812 
5813     // For each lane:
5814     for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
5815       Value *Scalar = Entry->Scalars[Lane];
5816 
5817 #ifndef NDEBUG
5818       Type *Ty = Scalar->getType();
5819       if (!Ty->isVoidTy()) {
5820         for (User *U : Scalar->users()) {
5821           LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
5822 
5823           // It is legal to delete users in the ignorelist.
5824           assert((getTreeEntry(U) || is_contained(UserIgnoreList, U) ||
5825                   (isa_and_nonnull<Instruction>(U) &&
5826                    isDeleted(cast<Instruction>(U)))) &&
5827                  "Deleting out-of-tree value");
5828         }
5829       }
5830 #endif
5831       LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
5832       eraseInstruction(cast<Instruction>(Scalar));
5833     }
5834   }
5835 
5836   Builder.ClearInsertionPoint();
5837   InstrElementSize.clear();
5838 
5839   return VectorizableTree[0]->VectorizedValue;
5840 }
5841 
5842 void BoUpSLP::optimizeGatherSequence() {
5843   LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size()
5844                     << " gather sequences instructions.\n");
5845   // LICM InsertElementInst sequences.
5846   for (Instruction *I : GatherSeq) {
5847     if (isDeleted(I))
5848       continue;
5849 
5850     // Check if this block is inside a loop.
5851     Loop *L = LI->getLoopFor(I->getParent());
5852     if (!L)
5853       continue;
5854 
5855     // Check if it has a preheader.
5856     BasicBlock *PreHeader = L->getLoopPreheader();
5857     if (!PreHeader)
5858       continue;
5859 
5860     // If the vector or the element that we insert into it are
5861     // instructions that are defined in this basic block then we can't
5862     // hoist this instruction.
5863     auto *Op0 = dyn_cast<Instruction>(I->getOperand(0));
5864     auto *Op1 = dyn_cast<Instruction>(I->getOperand(1));
5865     if (Op0 && L->contains(Op0))
5866       continue;
5867     if (Op1 && L->contains(Op1))
5868       continue;
5869 
5870     // We can hoist this instruction. Move it to the pre-header.
5871     I->moveBefore(PreHeader->getTerminator());
5872   }
5873 
5874   // Make a list of all reachable blocks in our CSE queue.
5875   SmallVector<const DomTreeNode *, 8> CSEWorkList;
5876   CSEWorkList.reserve(CSEBlocks.size());
5877   for (BasicBlock *BB : CSEBlocks)
5878     if (DomTreeNode *N = DT->getNode(BB)) {
5879       assert(DT->isReachableFromEntry(N));
5880       CSEWorkList.push_back(N);
5881     }
5882 
5883   // Sort blocks by domination. This ensures we visit a block after all blocks
5884   // dominating it are visited.
5885   llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) {
5886     assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) &&
5887            "Different nodes should have different DFS numbers");
5888     return A->getDFSNumIn() < B->getDFSNumIn();
5889   });
5890 
5891   // Perform O(N^2) search over the gather sequences and merge identical
5892   // instructions. TODO: We can further optimize this scan if we split the
5893   // instructions into different buckets based on the insert lane.
5894   SmallVector<Instruction *, 16> Visited;
5895   for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) {
5896     assert(*I &&
5897            (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&
5898            "Worklist not sorted properly!");
5899     BasicBlock *BB = (*I)->getBlock();
5900     // For all instructions in blocks containing gather sequences:
5901     for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) {
5902       Instruction *In = &*it++;
5903       if (isDeleted(In))
5904         continue;
5905       if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In) &&
5906           !isa<ShuffleVectorInst>(In))
5907         continue;
5908 
5909       // Check if we can replace this instruction with any of the
5910       // visited instructions.
5911       for (Instruction *v : Visited) {
5912         if (In->isIdenticalTo(v) &&
5913             DT->dominates(v->getParent(), In->getParent())) {
5914           In->replaceAllUsesWith(v);
5915           eraseInstruction(In);
5916           In = nullptr;
5917           break;
5918         }
5919       }
5920       if (In) {
5921         assert(!is_contained(Visited, In));
5922         Visited.push_back(In);
5923       }
5924     }
5925   }
5926   CSEBlocks.clear();
5927   GatherSeq.clear();
5928 }
5929 
5930 // Groups the instructions to a bundle (which is then a single scheduling entity)
5931 // and schedules instructions until the bundle gets ready.
5932 Optional<BoUpSLP::ScheduleData *>
5933 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
5934                                             const InstructionsState &S) {
5935   if (isa<PHINode>(S.OpValue) || isa<InsertElementInst>(S.OpValue))
5936     return nullptr;
5937 
5938   // Initialize the instruction bundle.
5939   Instruction *OldScheduleEnd = ScheduleEnd;
5940   ScheduleData *PrevInBundle = nullptr;
5941   ScheduleData *Bundle = nullptr;
5942   bool ReSchedule = false;
5943   LLVM_DEBUG(dbgs() << "SLP:  bundle: " << *S.OpValue << "\n");
5944 
5945   auto &&TryScheduleBundle = [this, OldScheduleEnd, SLP](bool ReSchedule,
5946                                                          ScheduleData *Bundle) {
5947     // The scheduling region got new instructions at the lower end (or it is a
5948     // new region for the first bundle). This makes it necessary to
5949     // recalculate all dependencies.
5950     // It is seldom that this needs to be done a second time after adding the
5951     // initial bundle to the region.
5952     if (ScheduleEnd != OldScheduleEnd) {
5953       for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode())
5954         doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); });
5955       ReSchedule = true;
5956     }
5957     if (ReSchedule) {
5958       resetSchedule();
5959       initialFillReadyList(ReadyInsts);
5960     }
5961     if (Bundle) {
5962       LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle
5963                         << " in block " << BB->getName() << "\n");
5964       calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP);
5965     }
5966 
5967     // Now try to schedule the new bundle or (if no bundle) just calculate
5968     // dependencies. As soon as the bundle is "ready" it means that there are no
5969     // cyclic dependencies and we can schedule it. Note that's important that we
5970     // don't "schedule" the bundle yet (see cancelScheduling).
5971     while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) &&
5972            !ReadyInsts.empty()) {
5973       ScheduleData *Picked = ReadyInsts.pop_back_val();
5974       if (Picked->isSchedulingEntity() && Picked->isReady())
5975         schedule(Picked, ReadyInsts);
5976     }
5977   };
5978 
5979   // Make sure that the scheduling region contains all
5980   // instructions of the bundle.
5981   for (Value *V : VL) {
5982     if (!extendSchedulingRegion(V, S)) {
5983       // If the scheduling region got new instructions at the lower end (or it
5984       // is a new region for the first bundle). This makes it necessary to
5985       // recalculate all dependencies.
5986       // Otherwise the compiler may crash trying to incorrectly calculate
5987       // dependencies and emit instruction in the wrong order at the actual
5988       // scheduling.
5989       TryScheduleBundle(/*ReSchedule=*/false, nullptr);
5990       return None;
5991     }
5992   }
5993 
5994   for (Value *V : VL) {
5995     ScheduleData *BundleMember = getScheduleData(V);
5996     assert(BundleMember &&
5997            "no ScheduleData for bundle member (maybe not in same basic block)");
5998     if (BundleMember->IsScheduled) {
5999       // A bundle member was scheduled as single instruction before and now
6000       // needs to be scheduled as part of the bundle. We just get rid of the
6001       // existing schedule.
6002       LLVM_DEBUG(dbgs() << "SLP:  reset schedule because " << *BundleMember
6003                         << " was already scheduled\n");
6004       ReSchedule = true;
6005     }
6006     assert(BundleMember->isSchedulingEntity() &&
6007            "bundle member already part of other bundle");
6008     if (PrevInBundle) {
6009       PrevInBundle->NextInBundle = BundleMember;
6010     } else {
6011       Bundle = BundleMember;
6012     }
6013     BundleMember->UnscheduledDepsInBundle = 0;
6014     Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps;
6015 
6016     // Group the instructions to a bundle.
6017     BundleMember->FirstInBundle = Bundle;
6018     PrevInBundle = BundleMember;
6019   }
6020   assert(Bundle && "Failed to find schedule bundle");
6021   TryScheduleBundle(ReSchedule, Bundle);
6022   if (!Bundle->isReady()) {
6023     cancelScheduling(VL, S.OpValue);
6024     return None;
6025   }
6026   return Bundle;
6027 }
6028 
6029 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL,
6030                                                 Value *OpValue) {
6031   if (isa<PHINode>(OpValue) || isa<InsertElementInst>(OpValue))
6032     return;
6033 
6034   ScheduleData *Bundle = getScheduleData(OpValue);
6035   LLVM_DEBUG(dbgs() << "SLP:  cancel scheduling of " << *Bundle << "\n");
6036   assert(!Bundle->IsScheduled &&
6037          "Can't cancel bundle which is already scheduled");
6038   assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() &&
6039          "tried to unbundle something which is not a bundle");
6040 
6041   // Un-bundle: make single instructions out of the bundle.
6042   ScheduleData *BundleMember = Bundle;
6043   while (BundleMember) {
6044     assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links");
6045     BundleMember->FirstInBundle = BundleMember;
6046     ScheduleData *Next = BundleMember->NextInBundle;
6047     BundleMember->NextInBundle = nullptr;
6048     BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps;
6049     if (BundleMember->UnscheduledDepsInBundle == 0) {
6050       ReadyInsts.insert(BundleMember);
6051     }
6052     BundleMember = Next;
6053   }
6054 }
6055 
6056 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() {
6057   // Allocate a new ScheduleData for the instruction.
6058   if (ChunkPos >= ChunkSize) {
6059     ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize));
6060     ChunkPos = 0;
6061   }
6062   return &(ScheduleDataChunks.back()[ChunkPos++]);
6063 }
6064 
6065 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V,
6066                                                       const InstructionsState &S) {
6067   if (getScheduleData(V, isOneOf(S, V)))
6068     return true;
6069   Instruction *I = dyn_cast<Instruction>(V);
6070   assert(I && "bundle member must be an instruction");
6071   assert(!isa<PHINode>(I) && !isa<InsertElementInst>(I) &&
6072          "phi nodes/insertelements don't need to be scheduled");
6073   auto &&CheckSheduleForI = [this, &S](Instruction *I) -> bool {
6074     ScheduleData *ISD = getScheduleData(I);
6075     if (!ISD)
6076       return false;
6077     assert(isInSchedulingRegion(ISD) &&
6078            "ScheduleData not in scheduling region");
6079     ScheduleData *SD = allocateScheduleDataChunks();
6080     SD->Inst = I;
6081     SD->init(SchedulingRegionID, S.OpValue);
6082     ExtraScheduleDataMap[I][S.OpValue] = SD;
6083     return true;
6084   };
6085   if (CheckSheduleForI(I))
6086     return true;
6087   if (!ScheduleStart) {
6088     // It's the first instruction in the new region.
6089     initScheduleData(I, I->getNextNode(), nullptr, nullptr);
6090     ScheduleStart = I;
6091     ScheduleEnd = I->getNextNode();
6092     if (isOneOf(S, I) != I)
6093       CheckSheduleForI(I);
6094     assert(ScheduleEnd && "tried to vectorize a terminator?");
6095     LLVM_DEBUG(dbgs() << "SLP:  initialize schedule region to " << *I << "\n");
6096     return true;
6097   }
6098   // Search up and down at the same time, because we don't know if the new
6099   // instruction is above or below the existing scheduling region.
6100   BasicBlock::reverse_iterator UpIter =
6101       ++ScheduleStart->getIterator().getReverse();
6102   BasicBlock::reverse_iterator UpperEnd = BB->rend();
6103   BasicBlock::iterator DownIter = ScheduleEnd->getIterator();
6104   BasicBlock::iterator LowerEnd = BB->end();
6105   while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I &&
6106          &*DownIter != I) {
6107     if (++ScheduleRegionSize > ScheduleRegionSizeLimit) {
6108       LLVM_DEBUG(dbgs() << "SLP:  exceeded schedule region size limit\n");
6109       return false;
6110     }
6111 
6112     ++UpIter;
6113     ++DownIter;
6114   }
6115   if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) {
6116     assert(I->getParent() == ScheduleStart->getParent() &&
6117            "Instruction is in wrong basic block.");
6118     initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion);
6119     ScheduleStart = I;
6120     if (isOneOf(S, I) != I)
6121       CheckSheduleForI(I);
6122     LLVM_DEBUG(dbgs() << "SLP:  extend schedule region start to " << *I
6123                       << "\n");
6124     return true;
6125   }
6126   assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) &&
6127          "Expected to reach top of the basic block or instruction down the "
6128          "lower end.");
6129   assert(I->getParent() == ScheduleEnd->getParent() &&
6130          "Instruction is in wrong basic block.");
6131   initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion,
6132                    nullptr);
6133   ScheduleEnd = I->getNextNode();
6134   if (isOneOf(S, I) != I)
6135     CheckSheduleForI(I);
6136   assert(ScheduleEnd && "tried to vectorize a terminator?");
6137   LLVM_DEBUG(dbgs() << "SLP:  extend schedule region end to " << *I << "\n");
6138   return true;
6139 }
6140 
6141 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI,
6142                                                 Instruction *ToI,
6143                                                 ScheduleData *PrevLoadStore,
6144                                                 ScheduleData *NextLoadStore) {
6145   ScheduleData *CurrentLoadStore = PrevLoadStore;
6146   for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) {
6147     ScheduleData *SD = ScheduleDataMap[I];
6148     if (!SD) {
6149       SD = allocateScheduleDataChunks();
6150       ScheduleDataMap[I] = SD;
6151       SD->Inst = I;
6152     }
6153     assert(!isInSchedulingRegion(SD) &&
6154            "new ScheduleData already in scheduling region");
6155     SD->init(SchedulingRegionID, I);
6156 
6157     if (I->mayReadOrWriteMemory() &&
6158         (!isa<IntrinsicInst>(I) ||
6159          (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect &&
6160           cast<IntrinsicInst>(I)->getIntrinsicID() !=
6161               Intrinsic::pseudoprobe))) {
6162       // Update the linked list of memory accessing instructions.
6163       if (CurrentLoadStore) {
6164         CurrentLoadStore->NextLoadStore = SD;
6165       } else {
6166         FirstLoadStoreInRegion = SD;
6167       }
6168       CurrentLoadStore = SD;
6169     }
6170   }
6171   if (NextLoadStore) {
6172     if (CurrentLoadStore)
6173       CurrentLoadStore->NextLoadStore = NextLoadStore;
6174   } else {
6175     LastLoadStoreInRegion = CurrentLoadStore;
6176   }
6177 }
6178 
6179 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
6180                                                      bool InsertInReadyList,
6181                                                      BoUpSLP *SLP) {
6182   assert(SD->isSchedulingEntity());
6183 
6184   SmallVector<ScheduleData *, 10> WorkList;
6185   WorkList.push_back(SD);
6186 
6187   while (!WorkList.empty()) {
6188     ScheduleData *SD = WorkList.pop_back_val();
6189 
6190     ScheduleData *BundleMember = SD;
6191     while (BundleMember) {
6192       assert(isInSchedulingRegion(BundleMember));
6193       if (!BundleMember->hasValidDependencies()) {
6194 
6195         LLVM_DEBUG(dbgs() << "SLP:       update deps of " << *BundleMember
6196                           << "\n");
6197         BundleMember->Dependencies = 0;
6198         BundleMember->resetUnscheduledDeps();
6199 
6200         // Handle def-use chain dependencies.
6201         if (BundleMember->OpValue != BundleMember->Inst) {
6202           ScheduleData *UseSD = getScheduleData(BundleMember->Inst);
6203           if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
6204             BundleMember->Dependencies++;
6205             ScheduleData *DestBundle = UseSD->FirstInBundle;
6206             if (!DestBundle->IsScheduled)
6207               BundleMember->incrementUnscheduledDeps(1);
6208             if (!DestBundle->hasValidDependencies())
6209               WorkList.push_back(DestBundle);
6210           }
6211         } else {
6212           for (User *U : BundleMember->Inst->users()) {
6213             if (isa<Instruction>(U)) {
6214               ScheduleData *UseSD = getScheduleData(U);
6215               if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
6216                 BundleMember->Dependencies++;
6217                 ScheduleData *DestBundle = UseSD->FirstInBundle;
6218                 if (!DestBundle->IsScheduled)
6219                   BundleMember->incrementUnscheduledDeps(1);
6220                 if (!DestBundle->hasValidDependencies())
6221                   WorkList.push_back(DestBundle);
6222               }
6223             } else {
6224               // I'm not sure if this can ever happen. But we need to be safe.
6225               // This lets the instruction/bundle never be scheduled and
6226               // eventually disable vectorization.
6227               BundleMember->Dependencies++;
6228               BundleMember->incrementUnscheduledDeps(1);
6229             }
6230           }
6231         }
6232 
6233         // Handle the memory dependencies.
6234         ScheduleData *DepDest = BundleMember->NextLoadStore;
6235         if (DepDest) {
6236           Instruction *SrcInst = BundleMember->Inst;
6237           MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA);
6238           bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
6239           unsigned numAliased = 0;
6240           unsigned DistToSrc = 1;
6241 
6242           while (DepDest) {
6243             assert(isInSchedulingRegion(DepDest));
6244 
6245             // We have two limits to reduce the complexity:
6246             // 1) AliasedCheckLimit: It's a small limit to reduce calls to
6247             //    SLP->isAliased (which is the expensive part in this loop).
6248             // 2) MaxMemDepDistance: It's for very large blocks and it aborts
6249             //    the whole loop (even if the loop is fast, it's quadratic).
6250             //    It's important for the loop break condition (see below) to
6251             //    check this limit even between two read-only instructions.
6252             if (DistToSrc >= MaxMemDepDistance ||
6253                     ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
6254                      (numAliased >= AliasedCheckLimit ||
6255                       SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
6256 
6257               // We increment the counter only if the locations are aliased
6258               // (instead of counting all alias checks). This gives a better
6259               // balance between reduced runtime and accurate dependencies.
6260               numAliased++;
6261 
6262               DepDest->MemoryDependencies.push_back(BundleMember);
6263               BundleMember->Dependencies++;
6264               ScheduleData *DestBundle = DepDest->FirstInBundle;
6265               if (!DestBundle->IsScheduled) {
6266                 BundleMember->incrementUnscheduledDeps(1);
6267               }
6268               if (!DestBundle->hasValidDependencies()) {
6269                 WorkList.push_back(DestBundle);
6270               }
6271             }
6272             DepDest = DepDest->NextLoadStore;
6273 
6274             // Example, explaining the loop break condition: Let's assume our
6275             // starting instruction is i0 and MaxMemDepDistance = 3.
6276             //
6277             //                      +--------v--v--v
6278             //             i0,i1,i2,i3,i4,i5,i6,i7,i8
6279             //             +--------^--^--^
6280             //
6281             // MaxMemDepDistance let us stop alias-checking at i3 and we add
6282             // dependencies from i0 to i3,i4,.. (even if they are not aliased).
6283             // Previously we already added dependencies from i3 to i6,i7,i8
6284             // (because of MaxMemDepDistance). As we added a dependency from
6285             // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
6286             // and we can abort this loop at i6.
6287             if (DistToSrc >= 2 * MaxMemDepDistance)
6288               break;
6289             DistToSrc++;
6290           }
6291         }
6292       }
6293       BundleMember = BundleMember->NextInBundle;
6294     }
6295     if (InsertInReadyList && SD->isReady()) {
6296       ReadyInsts.push_back(SD);
6297       LLVM_DEBUG(dbgs() << "SLP:     gets ready on update: " << *SD->Inst
6298                         << "\n");
6299     }
6300   }
6301 }
6302 
6303 void BoUpSLP::BlockScheduling::resetSchedule() {
6304   assert(ScheduleStart &&
6305          "tried to reset schedule on block which has not been scheduled");
6306   for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
6307     doForAllOpcodes(I, [&](ScheduleData *SD) {
6308       assert(isInSchedulingRegion(SD) &&
6309              "ScheduleData not in scheduling region");
6310       SD->IsScheduled = false;
6311       SD->resetUnscheduledDeps();
6312     });
6313   }
6314   ReadyInsts.clear();
6315 }
6316 
6317 void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
6318   if (!BS->ScheduleStart)
6319     return;
6320 
6321   LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n");
6322 
6323   BS->resetSchedule();
6324 
6325   // For the real scheduling we use a more sophisticated ready-list: it is
6326   // sorted by the original instruction location. This lets the final schedule
6327   // be as  close as possible to the original instruction order.
6328   struct ScheduleDataCompare {
6329     bool operator()(ScheduleData *SD1, ScheduleData *SD2) const {
6330       return SD2->SchedulingPriority < SD1->SchedulingPriority;
6331     }
6332   };
6333   std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts;
6334 
6335   // Ensure that all dependency data is updated and fill the ready-list with
6336   // initial instructions.
6337   int Idx = 0;
6338   int NumToSchedule = 0;
6339   for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd;
6340        I = I->getNextNode()) {
6341     BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) {
6342       assert((isa<InsertElementInst>(SD->Inst) ||
6343               SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr)) &&
6344              "scheduler and vectorizer bundle mismatch");
6345       SD->FirstInBundle->SchedulingPriority = Idx++;
6346       if (SD->isSchedulingEntity()) {
6347         BS->calculateDependencies(SD, false, this);
6348         NumToSchedule++;
6349       }
6350     });
6351   }
6352   BS->initialFillReadyList(ReadyInsts);
6353 
6354   Instruction *LastScheduledInst = BS->ScheduleEnd;
6355 
6356   // Do the "real" scheduling.
6357   while (!ReadyInsts.empty()) {
6358     ScheduleData *picked = *ReadyInsts.begin();
6359     ReadyInsts.erase(ReadyInsts.begin());
6360 
6361     // Move the scheduled instruction(s) to their dedicated places, if not
6362     // there yet.
6363     ScheduleData *BundleMember = picked;
6364     while (BundleMember) {
6365       Instruction *pickedInst = BundleMember->Inst;
6366       if (pickedInst->getNextNode() != LastScheduledInst) {
6367         BS->BB->getInstList().remove(pickedInst);
6368         BS->BB->getInstList().insert(LastScheduledInst->getIterator(),
6369                                      pickedInst);
6370       }
6371       LastScheduledInst = pickedInst;
6372       BundleMember = BundleMember->NextInBundle;
6373     }
6374 
6375     BS->schedule(picked, ReadyInsts);
6376     NumToSchedule--;
6377   }
6378   assert(NumToSchedule == 0 && "could not schedule all instructions");
6379 
6380   // Avoid duplicate scheduling of the block.
6381   BS->ScheduleStart = nullptr;
6382 }
6383 
6384 unsigned BoUpSLP::getVectorElementSize(Value *V) {
6385   // If V is a store, just return the width of the stored value (or value
6386   // truncated just before storing) without traversing the expression tree.
6387   // This is the common case.
6388   if (auto *Store = dyn_cast<StoreInst>(V)) {
6389     if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand()))
6390       return DL->getTypeSizeInBits(Trunc->getSrcTy());
6391     return DL->getTypeSizeInBits(Store->getValueOperand()->getType());
6392   }
6393 
6394   if (auto *IEI = dyn_cast<InsertElementInst>(V))
6395     return getVectorElementSize(IEI->getOperand(1));
6396 
6397   auto E = InstrElementSize.find(V);
6398   if (E != InstrElementSize.end())
6399     return E->second;
6400 
6401   // If V is not a store, we can traverse the expression tree to find loads
6402   // that feed it. The type of the loaded value may indicate a more suitable
6403   // width than V's type. We want to base the vector element size on the width
6404   // of memory operations where possible.
6405   SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist;
6406   SmallPtrSet<Instruction *, 16> Visited;
6407   if (auto *I = dyn_cast<Instruction>(V)) {
6408     Worklist.emplace_back(I, I->getParent());
6409     Visited.insert(I);
6410   }
6411 
6412   // Traverse the expression tree in bottom-up order looking for loads. If we
6413   // encounter an instruction we don't yet handle, we give up.
6414   auto Width = 0u;
6415   while (!Worklist.empty()) {
6416     Instruction *I;
6417     BasicBlock *Parent;
6418     std::tie(I, Parent) = Worklist.pop_back_val();
6419 
6420     // We should only be looking at scalar instructions here. If the current
6421     // instruction has a vector type, skip.
6422     auto *Ty = I->getType();
6423     if (isa<VectorType>(Ty))
6424       continue;
6425 
6426     // If the current instruction is a load, update MaxWidth to reflect the
6427     // width of the loaded value.
6428     if (isa<LoadInst>(I) || isa<ExtractElementInst>(I) ||
6429         isa<ExtractValueInst>(I))
6430       Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty));
6431 
6432     // Otherwise, we need to visit the operands of the instruction. We only
6433     // handle the interesting cases from buildTree here. If an operand is an
6434     // instruction we haven't yet visited and from the same basic block as the
6435     // user or the use is a PHI node, we add it to the worklist.
6436     else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
6437              isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I) ||
6438              isa<UnaryOperator>(I)) {
6439       for (Use &U : I->operands())
6440         if (auto *J = dyn_cast<Instruction>(U.get()))
6441           if (Visited.insert(J).second &&
6442               (isa<PHINode>(I) || J->getParent() == Parent))
6443             Worklist.emplace_back(J, J->getParent());
6444     } else {
6445       break;
6446     }
6447   }
6448 
6449   // If we didn't encounter a memory access in the expression tree, or if we
6450   // gave up for some reason, just return the width of V. Otherwise, return the
6451   // maximum width we found.
6452   if (!Width) {
6453     if (auto *CI = dyn_cast<CmpInst>(V))
6454       V = CI->getOperand(0);
6455     Width = DL->getTypeSizeInBits(V->getType());
6456   }
6457 
6458   for (Instruction *I : Visited)
6459     InstrElementSize[I] = Width;
6460 
6461   return Width;
6462 }
6463 
6464 // Determine if a value V in a vectorizable expression Expr can be demoted to a
6465 // smaller type with a truncation. We collect the values that will be demoted
6466 // in ToDemote and additional roots that require investigating in Roots.
6467 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr,
6468                                   SmallVectorImpl<Value *> &ToDemote,
6469                                   SmallVectorImpl<Value *> &Roots) {
6470   // We can always demote constants.
6471   if (isa<Constant>(V)) {
6472     ToDemote.push_back(V);
6473     return true;
6474   }
6475 
6476   // If the value is not an instruction in the expression with only one use, it
6477   // cannot be demoted.
6478   auto *I = dyn_cast<Instruction>(V);
6479   if (!I || !I->hasOneUse() || !Expr.count(I))
6480     return false;
6481 
6482   switch (I->getOpcode()) {
6483 
6484   // We can always demote truncations and extensions. Since truncations can
6485   // seed additional demotion, we save the truncated value.
6486   case Instruction::Trunc:
6487     Roots.push_back(I->getOperand(0));
6488     break;
6489   case Instruction::ZExt:
6490   case Instruction::SExt:
6491     if (isa<ExtractElementInst>(I->getOperand(0)) ||
6492         isa<InsertElementInst>(I->getOperand(0)))
6493       return false;
6494     break;
6495 
6496   // We can demote certain binary operations if we can demote both of their
6497   // operands.
6498   case Instruction::Add:
6499   case Instruction::Sub:
6500   case Instruction::Mul:
6501   case Instruction::And:
6502   case Instruction::Or:
6503   case Instruction::Xor:
6504     if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) ||
6505         !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots))
6506       return false;
6507     break;
6508 
6509   // We can demote selects if we can demote their true and false values.
6510   case Instruction::Select: {
6511     SelectInst *SI = cast<SelectInst>(I);
6512     if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) ||
6513         !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots))
6514       return false;
6515     break;
6516   }
6517 
6518   // We can demote phis if we can demote all their incoming operands. Note that
6519   // we don't need to worry about cycles since we ensure single use above.
6520   case Instruction::PHI: {
6521     PHINode *PN = cast<PHINode>(I);
6522     for (Value *IncValue : PN->incoming_values())
6523       if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots))
6524         return false;
6525     break;
6526   }
6527 
6528   // Otherwise, conservatively give up.
6529   default:
6530     return false;
6531   }
6532 
6533   // Record the value that we can demote.
6534   ToDemote.push_back(V);
6535   return true;
6536 }
6537 
6538 void BoUpSLP::computeMinimumValueSizes() {
6539   // If there are no external uses, the expression tree must be rooted by a
6540   // store. We can't demote in-memory values, so there is nothing to do here.
6541   if (ExternalUses.empty())
6542     return;
6543 
6544   // We only attempt to truncate integer expressions.
6545   auto &TreeRoot = VectorizableTree[0]->Scalars;
6546   auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType());
6547   if (!TreeRootIT)
6548     return;
6549 
6550   // If the expression is not rooted by a store, these roots should have
6551   // external uses. We will rely on InstCombine to rewrite the expression in
6552   // the narrower type. However, InstCombine only rewrites single-use values.
6553   // This means that if a tree entry other than a root is used externally, it
6554   // must have multiple uses and InstCombine will not rewrite it. The code
6555   // below ensures that only the roots are used externally.
6556   SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end());
6557   for (auto &EU : ExternalUses)
6558     if (!Expr.erase(EU.Scalar))
6559       return;
6560   if (!Expr.empty())
6561     return;
6562 
6563   // Collect the scalar values of the vectorizable expression. We will use this
6564   // context to determine which values can be demoted. If we see a truncation,
6565   // we mark it as seeding another demotion.
6566   for (auto &EntryPtr : VectorizableTree)
6567     Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end());
6568 
6569   // Ensure the roots of the vectorizable tree don't form a cycle. They must
6570   // have a single external user that is not in the vectorizable tree.
6571   for (auto *Root : TreeRoot)
6572     if (!Root->hasOneUse() || Expr.count(*Root->user_begin()))
6573       return;
6574 
6575   // Conservatively determine if we can actually truncate the roots of the
6576   // expression. Collect the values that can be demoted in ToDemote and
6577   // additional roots that require investigating in Roots.
6578   SmallVector<Value *, 32> ToDemote;
6579   SmallVector<Value *, 4> Roots;
6580   for (auto *Root : TreeRoot)
6581     if (!collectValuesToDemote(Root, Expr, ToDemote, Roots))
6582       return;
6583 
6584   // The maximum bit width required to represent all the values that can be
6585   // demoted without loss of precision. It would be safe to truncate the roots
6586   // of the expression to this width.
6587   auto MaxBitWidth = 8u;
6588 
6589   // We first check if all the bits of the roots are demanded. If they're not,
6590   // we can truncate the roots to this narrower type.
6591   for (auto *Root : TreeRoot) {
6592     auto Mask = DB->getDemandedBits(cast<Instruction>(Root));
6593     MaxBitWidth = std::max<unsigned>(
6594         Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth);
6595   }
6596 
6597   // True if the roots can be zero-extended back to their original type, rather
6598   // than sign-extended. We know that if the leading bits are not demanded, we
6599   // can safely zero-extend. So we initialize IsKnownPositive to True.
6600   bool IsKnownPositive = true;
6601 
6602   // If all the bits of the roots are demanded, we can try a little harder to
6603   // compute a narrower type. This can happen, for example, if the roots are
6604   // getelementptr indices. InstCombine promotes these indices to the pointer
6605   // width. Thus, all their bits are technically demanded even though the
6606   // address computation might be vectorized in a smaller type.
6607   //
6608   // We start by looking at each entry that can be demoted. We compute the
6609   // maximum bit width required to store the scalar by using ValueTracking to
6610   // compute the number of high-order bits we can truncate.
6611   if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) &&
6612       llvm::all_of(TreeRoot, [](Value *R) {
6613         assert(R->hasOneUse() && "Root should have only one use!");
6614         return isa<GetElementPtrInst>(R->user_back());
6615       })) {
6616     MaxBitWidth = 8u;
6617 
6618     // Determine if the sign bit of all the roots is known to be zero. If not,
6619     // IsKnownPositive is set to False.
6620     IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) {
6621       KnownBits Known = computeKnownBits(R, *DL);
6622       return Known.isNonNegative();
6623     });
6624 
6625     // Determine the maximum number of bits required to store the scalar
6626     // values.
6627     for (auto *Scalar : ToDemote) {
6628       auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT);
6629       auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType());
6630       MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth);
6631     }
6632 
6633     // If we can't prove that the sign bit is zero, we must add one to the
6634     // maximum bit width to account for the unknown sign bit. This preserves
6635     // the existing sign bit so we can safely sign-extend the root back to the
6636     // original type. Otherwise, if we know the sign bit is zero, we will
6637     // zero-extend the root instead.
6638     //
6639     // FIXME: This is somewhat suboptimal, as there will be cases where adding
6640     //        one to the maximum bit width will yield a larger-than-necessary
6641     //        type. In general, we need to add an extra bit only if we can't
6642     //        prove that the upper bit of the original type is equal to the
6643     //        upper bit of the proposed smaller type. If these two bits are the
6644     //        same (either zero or one) we know that sign-extending from the
6645     //        smaller type will result in the same value. Here, since we can't
6646     //        yet prove this, we are just making the proposed smaller type
6647     //        larger to ensure correctness.
6648     if (!IsKnownPositive)
6649       ++MaxBitWidth;
6650   }
6651 
6652   // Round MaxBitWidth up to the next power-of-two.
6653   if (!isPowerOf2_64(MaxBitWidth))
6654     MaxBitWidth = NextPowerOf2(MaxBitWidth);
6655 
6656   // If the maximum bit width we compute is less than the with of the roots'
6657   // type, we can proceed with the narrowing. Otherwise, do nothing.
6658   if (MaxBitWidth >= TreeRootIT->getBitWidth())
6659     return;
6660 
6661   // If we can truncate the root, we must collect additional values that might
6662   // be demoted as a result. That is, those seeded by truncations we will
6663   // modify.
6664   while (!Roots.empty())
6665     collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots);
6666 
6667   // Finally, map the values we can demote to the maximum bit with we computed.
6668   for (auto *Scalar : ToDemote)
6669     MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive);
6670 }
6671 
6672 namespace {
6673 
6674 /// The SLPVectorizer Pass.
6675 struct SLPVectorizer : public FunctionPass {
6676   SLPVectorizerPass Impl;
6677 
6678   /// Pass identification, replacement for typeid
6679   static char ID;
6680 
6681   explicit SLPVectorizer() : FunctionPass(ID) {
6682     initializeSLPVectorizerPass(*PassRegistry::getPassRegistry());
6683   }
6684 
6685   bool doInitialization(Module &M) override {
6686     return false;
6687   }
6688 
6689   bool runOnFunction(Function &F) override {
6690     if (skipFunction(F))
6691       return false;
6692 
6693     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
6694     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
6695     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
6696     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
6697     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
6698     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
6699     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
6700     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
6701     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
6702     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
6703 
6704     return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE);
6705   }
6706 
6707   void getAnalysisUsage(AnalysisUsage &AU) const override {
6708     FunctionPass::getAnalysisUsage(AU);
6709     AU.addRequired<AssumptionCacheTracker>();
6710     AU.addRequired<ScalarEvolutionWrapperPass>();
6711     AU.addRequired<AAResultsWrapperPass>();
6712     AU.addRequired<TargetTransformInfoWrapperPass>();
6713     AU.addRequired<LoopInfoWrapperPass>();
6714     AU.addRequired<DominatorTreeWrapperPass>();
6715     AU.addRequired<DemandedBitsWrapperPass>();
6716     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
6717     AU.addRequired<InjectTLIMappingsLegacy>();
6718     AU.addPreserved<LoopInfoWrapperPass>();
6719     AU.addPreserved<DominatorTreeWrapperPass>();
6720     AU.addPreserved<AAResultsWrapperPass>();
6721     AU.addPreserved<GlobalsAAWrapperPass>();
6722     AU.setPreservesCFG();
6723   }
6724 };
6725 
6726 } // end anonymous namespace
6727 
6728 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) {
6729   auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
6730   auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
6731   auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F);
6732   auto *AA = &AM.getResult<AAManager>(F);
6733   auto *LI = &AM.getResult<LoopAnalysis>(F);
6734   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
6735   auto *AC = &AM.getResult<AssumptionAnalysis>(F);
6736   auto *DB = &AM.getResult<DemandedBitsAnalysis>(F);
6737   auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
6738 
6739   bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE);
6740   if (!Changed)
6741     return PreservedAnalyses::all();
6742 
6743   PreservedAnalyses PA;
6744   PA.preserveSet<CFGAnalyses>();
6745   return PA;
6746 }
6747 
6748 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
6749                                 TargetTransformInfo *TTI_,
6750                                 TargetLibraryInfo *TLI_, AAResults *AA_,
6751                                 LoopInfo *LI_, DominatorTree *DT_,
6752                                 AssumptionCache *AC_, DemandedBits *DB_,
6753                                 OptimizationRemarkEmitter *ORE_) {
6754   if (!RunSLPVectorization)
6755     return false;
6756   SE = SE_;
6757   TTI = TTI_;
6758   TLI = TLI_;
6759   AA = AA_;
6760   LI = LI_;
6761   DT = DT_;
6762   AC = AC_;
6763   DB = DB_;
6764   DL = &F.getParent()->getDataLayout();
6765 
6766   Stores.clear();
6767   GEPs.clear();
6768   bool Changed = false;
6769 
6770   // If the target claims to have no vector registers don't attempt
6771   // vectorization.
6772   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)))
6773     return false;
6774 
6775   // Don't vectorize when the attribute NoImplicitFloat is used.
6776   if (F.hasFnAttribute(Attribute::NoImplicitFloat))
6777     return false;
6778 
6779   LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
6780 
6781   // Use the bottom up slp vectorizer to construct chains that start with
6782   // store instructions.
6783   BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_);
6784 
6785   // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to
6786   // delete instructions.
6787 
6788   // Update DFS numbers now so that we can use them for ordering.
6789   DT->updateDFSNumbers();
6790 
6791   // Scan the blocks in the function in post order.
6792   for (auto BB : post_order(&F.getEntryBlock())) {
6793     collectSeedInstructions(BB);
6794 
6795     // Vectorize trees that end at stores.
6796     if (!Stores.empty()) {
6797       LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()
6798                         << " underlying objects.\n");
6799       Changed |= vectorizeStoreChains(R);
6800     }
6801 
6802     // Vectorize trees that end at reductions.
6803     Changed |= vectorizeChainsInBlock(BB, R);
6804 
6805     // Vectorize the index computations of getelementptr instructions. This
6806     // is primarily intended to catch gather-like idioms ending at
6807     // non-consecutive loads.
6808     if (!GEPs.empty()) {
6809       LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()
6810                         << " underlying objects.\n");
6811       Changed |= vectorizeGEPIndices(BB, R);
6812     }
6813   }
6814 
6815   if (Changed) {
6816     R.optimizeGatherSequence();
6817     LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
6818   }
6819   return Changed;
6820 }
6821 
6822 /// Order may have elements assigned special value (size) which is out of
6823 /// bounds. Such indices only appear on places which correspond to undef values
6824 /// (see canReuseExtract for details) and used in order to avoid undef values
6825 /// have effect on operands ordering.
6826 /// The first loop below simply finds all unused indices and then the next loop
6827 /// nest assigns these indices for undef values positions.
6828 /// As an example below Order has two undef positions and they have assigned
6829 /// values 3 and 7 respectively:
6830 /// before:  6 9 5 4 9 2 1 0
6831 /// after:   6 3 5 4 7 2 1 0
6832 /// \returns Fixed ordering.
6833 static BoUpSLP::OrdersType fixupOrderingIndices(ArrayRef<unsigned> Order) {
6834   BoUpSLP::OrdersType NewOrder(Order.begin(), Order.end());
6835   const unsigned Sz = NewOrder.size();
6836   SmallBitVector UsedIndices(Sz);
6837   SmallVector<int> MaskedIndices;
6838   for (int I = 0, E = NewOrder.size(); I < E; ++I) {
6839     if (NewOrder[I] < Sz)
6840       UsedIndices.set(NewOrder[I]);
6841     else
6842       MaskedIndices.push_back(I);
6843   }
6844   if (MaskedIndices.empty())
6845     return NewOrder;
6846   SmallVector<int> AvailableIndices(MaskedIndices.size());
6847   unsigned Cnt = 0;
6848   int Idx = UsedIndices.find_first();
6849   do {
6850     AvailableIndices[Cnt] = Idx;
6851     Idx = UsedIndices.find_next(Idx);
6852     ++Cnt;
6853   } while (Idx > 0);
6854   assert(Cnt == MaskedIndices.size() && "Non-synced masked/available indices.");
6855   for (int I = 0, E = MaskedIndices.size(); I < E; ++I)
6856     NewOrder[MaskedIndices[I]] = AvailableIndices[I];
6857   return NewOrder;
6858 }
6859 
6860 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
6861                                             unsigned Idx) {
6862   LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size()
6863                     << "\n");
6864   const unsigned Sz = R.getVectorElementSize(Chain[0]);
6865   const unsigned MinVF = R.getMinVecRegSize() / Sz;
6866   unsigned VF = Chain.size();
6867 
6868   if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF)
6869     return false;
6870 
6871   LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx
6872                     << "\n");
6873 
6874   R.buildTree(Chain);
6875   Optional<ArrayRef<unsigned>> Order = R.bestOrder();
6876   // TODO: Handle orders of size less than number of elements in the vector.
6877   if (Order && Order->size() == Chain.size()) {
6878     // TODO: reorder tree nodes without tree rebuilding.
6879     SmallVector<Value *, 4> ReorderedOps(Chain.size());
6880     transform(fixupOrderingIndices(*Order), ReorderedOps.begin(),
6881               [Chain](const unsigned Idx) { return Chain[Idx]; });
6882     R.buildTree(ReorderedOps);
6883   }
6884   if (R.isTreeTinyAndNotFullyVectorizable())
6885     return false;
6886   if (R.isLoadCombineCandidate())
6887     return false;
6888 
6889   R.computeMinimumValueSizes();
6890 
6891   InstructionCost Cost = R.getTreeCost();
6892 
6893   LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF =" << VF << "\n");
6894   if (Cost < -SLPCostThreshold) {
6895     LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n");
6896 
6897     using namespace ore;
6898 
6899     R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized",
6900                                         cast<StoreInst>(Chain[0]))
6901                      << "Stores SLP vectorized with cost " << NV("Cost", Cost)
6902                      << " and with tree size "
6903                      << NV("TreeSize", R.getTreeSize()));
6904 
6905     R.vectorizeTree();
6906     return true;
6907   }
6908 
6909   return false;
6910 }
6911 
6912 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores,
6913                                         BoUpSLP &R) {
6914   // We may run into multiple chains that merge into a single chain. We mark the
6915   // stores that we vectorized so that we don't visit the same store twice.
6916   BoUpSLP::ValueSet VectorizedStores;
6917   bool Changed = false;
6918 
6919   int E = Stores.size();
6920   SmallBitVector Tails(E, false);
6921   int MaxIter = MaxStoreLookup.getValue();
6922   SmallVector<std::pair<int, int>, 16> ConsecutiveChain(
6923       E, std::make_pair(E, INT_MAX));
6924   SmallVector<SmallBitVector, 4> CheckedPairs(E, SmallBitVector(E, false));
6925   int IterCnt;
6926   auto &&FindConsecutiveAccess = [this, &Stores, &Tails, &IterCnt, MaxIter,
6927                                   &CheckedPairs,
6928                                   &ConsecutiveChain](int K, int Idx) {
6929     if (IterCnt >= MaxIter)
6930       return true;
6931     if (CheckedPairs[Idx].test(K))
6932       return ConsecutiveChain[K].second == 1 &&
6933              ConsecutiveChain[K].first == Idx;
6934     ++IterCnt;
6935     CheckedPairs[Idx].set(K);
6936     CheckedPairs[K].set(Idx);
6937     Optional<int> Diff = getPointersDiff(
6938         Stores[K]->getValueOperand()->getType(), Stores[K]->getPointerOperand(),
6939         Stores[Idx]->getValueOperand()->getType(),
6940         Stores[Idx]->getPointerOperand(), *DL, *SE, /*StrictCheck=*/true);
6941     if (!Diff || *Diff == 0)
6942       return false;
6943     int Val = *Diff;
6944     if (Val < 0) {
6945       if (ConsecutiveChain[Idx].second > -Val) {
6946         Tails.set(K);
6947         ConsecutiveChain[Idx] = std::make_pair(K, -Val);
6948       }
6949       return false;
6950     }
6951     if (ConsecutiveChain[K].second <= Val)
6952       return false;
6953 
6954     Tails.set(Idx);
6955     ConsecutiveChain[K] = std::make_pair(Idx, Val);
6956     return Val == 1;
6957   };
6958   // Do a quadratic search on all of the given stores in reverse order and find
6959   // all of the pairs of stores that follow each other.
6960   for (int Idx = E - 1; Idx >= 0; --Idx) {
6961     // If a store has multiple consecutive store candidates, search according
6962     // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ...
6963     // This is because usually pairing with immediate succeeding or preceding
6964     // candidate create the best chance to find slp vectorization opportunity.
6965     const int MaxLookDepth = std::max(E - Idx, Idx + 1);
6966     IterCnt = 0;
6967     for (int Offset = 1, F = MaxLookDepth; Offset < F; ++Offset)
6968       if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) ||
6969           (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx)))
6970         break;
6971   }
6972 
6973   // Tracks if we tried to vectorize stores starting from the given tail
6974   // already.
6975   SmallBitVector TriedTails(E, false);
6976   // For stores that start but don't end a link in the chain:
6977   for (int Cnt = E; Cnt > 0; --Cnt) {
6978     int I = Cnt - 1;
6979     if (ConsecutiveChain[I].first == E || Tails.test(I))
6980       continue;
6981     // We found a store instr that starts a chain. Now follow the chain and try
6982     // to vectorize it.
6983     BoUpSLP::ValueList Operands;
6984     // Collect the chain into a list.
6985     while (I != E && !VectorizedStores.count(Stores[I])) {
6986       Operands.push_back(Stores[I]);
6987       Tails.set(I);
6988       if (ConsecutiveChain[I].second != 1) {
6989         // Mark the new end in the chain and go back, if required. It might be
6990         // required if the original stores come in reversed order, for example.
6991         if (ConsecutiveChain[I].first != E &&
6992             Tails.test(ConsecutiveChain[I].first) && !TriedTails.test(I) &&
6993             !VectorizedStores.count(Stores[ConsecutiveChain[I].first])) {
6994           TriedTails.set(I);
6995           Tails.reset(ConsecutiveChain[I].first);
6996           if (Cnt < ConsecutiveChain[I].first + 2)
6997             Cnt = ConsecutiveChain[I].first + 2;
6998         }
6999         break;
7000       }
7001       // Move to the next value in the chain.
7002       I = ConsecutiveChain[I].first;
7003     }
7004     assert(!Operands.empty() && "Expected non-empty list of stores.");
7005 
7006     unsigned MaxVecRegSize = R.getMaxVecRegSize();
7007     unsigned EltSize = R.getVectorElementSize(Operands[0]);
7008     unsigned MaxElts = llvm::PowerOf2Floor(MaxVecRegSize / EltSize);
7009 
7010     unsigned MinVF = std::max(2U, R.getMinVecRegSize() / EltSize);
7011     unsigned MaxVF = std::min(R.getMaximumVF(EltSize, Instruction::Store),
7012                               MaxElts);
7013 
7014     // FIXME: Is division-by-2 the correct step? Should we assert that the
7015     // register size is a power-of-2?
7016     unsigned StartIdx = 0;
7017     for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) {
7018       for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) {
7019         ArrayRef<Value *> Slice = makeArrayRef(Operands).slice(Cnt, Size);
7020         if (!VectorizedStores.count(Slice.front()) &&
7021             !VectorizedStores.count(Slice.back()) &&
7022             vectorizeStoreChain(Slice, R, Cnt)) {
7023           // Mark the vectorized stores so that we don't vectorize them again.
7024           VectorizedStores.insert(Slice.begin(), Slice.end());
7025           Changed = true;
7026           // If we vectorized initial block, no need to try to vectorize it
7027           // again.
7028           if (Cnt == StartIdx)
7029             StartIdx += Size;
7030           Cnt += Size;
7031           continue;
7032         }
7033         ++Cnt;
7034       }
7035       // Check if the whole array was vectorized already - exit.
7036       if (StartIdx >= Operands.size())
7037         break;
7038     }
7039   }
7040 
7041   return Changed;
7042 }
7043 
7044 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) {
7045   // Initialize the collections. We will make a single pass over the block.
7046   Stores.clear();
7047   GEPs.clear();
7048 
7049   // Visit the store and getelementptr instructions in BB and organize them in
7050   // Stores and GEPs according to the underlying objects of their pointer
7051   // operands.
7052   for (Instruction &I : *BB) {
7053     // Ignore store instructions that are volatile or have a pointer operand
7054     // that doesn't point to a scalar type.
7055     if (auto *SI = dyn_cast<StoreInst>(&I)) {
7056       if (!SI->isSimple())
7057         continue;
7058       if (!isValidElementType(SI->getValueOperand()->getType()))
7059         continue;
7060       Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI);
7061     }
7062 
7063     // Ignore getelementptr instructions that have more than one index, a
7064     // constant index, or a pointer operand that doesn't point to a scalar
7065     // type.
7066     else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
7067       auto Idx = GEP->idx_begin()->get();
7068       if (GEP->getNumIndices() > 1 || isa<Constant>(Idx))
7069         continue;
7070       if (!isValidElementType(Idx->getType()))
7071         continue;
7072       if (GEP->getType()->isVectorTy())
7073         continue;
7074       GEPs[GEP->getPointerOperand()].push_back(GEP);
7075     }
7076   }
7077 }
7078 
7079 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) {
7080   if (!A || !B)
7081     return false;
7082   Value *VL[] = {A, B};
7083   return tryToVectorizeList(VL, R, /*AllowReorder=*/true);
7084 }
7085 
7086 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
7087                                            bool AllowReorder) {
7088   if (VL.size() < 2)
7089     return false;
7090 
7091   LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = "
7092                     << VL.size() << ".\n");
7093 
7094   // Check that all of the parts are instructions of the same type,
7095   // we permit an alternate opcode via InstructionsState.
7096   InstructionsState S = getSameOpcode(VL);
7097   if (!S.getOpcode())
7098     return false;
7099 
7100   Instruction *I0 = cast<Instruction>(S.OpValue);
7101   // Make sure invalid types (including vector type) are rejected before
7102   // determining vectorization factor for scalar instructions.
7103   for (Value *V : VL) {
7104     Type *Ty = V->getType();
7105     if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) {
7106       // NOTE: the following will give user internal llvm type name, which may
7107       // not be useful.
7108       R.getORE()->emit([&]() {
7109         std::string type_str;
7110         llvm::raw_string_ostream rso(type_str);
7111         Ty->print(rso);
7112         return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0)
7113                << "Cannot SLP vectorize list: type "
7114                << rso.str() + " is unsupported by vectorizer";
7115       });
7116       return false;
7117     }
7118   }
7119 
7120   unsigned Sz = R.getVectorElementSize(I0);
7121   unsigned MinVF = std::max(2U, R.getMinVecRegSize() / Sz);
7122   unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF);
7123   MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF);
7124   if (MaxVF < 2) {
7125     R.getORE()->emit([&]() {
7126       return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0)
7127              << "Cannot SLP vectorize list: vectorization factor "
7128              << "less than 2 is not supported";
7129     });
7130     return false;
7131   }
7132 
7133   bool Changed = false;
7134   bool CandidateFound = false;
7135   InstructionCost MinCost = SLPCostThreshold.getValue();
7136   Type *ScalarTy = VL[0]->getType();
7137   if (auto *IE = dyn_cast<InsertElementInst>(VL[0]))
7138     ScalarTy = IE->getOperand(1)->getType();
7139 
7140   unsigned NextInst = 0, MaxInst = VL.size();
7141   for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) {
7142     // No actual vectorization should happen, if number of parts is the same as
7143     // provided vectorization factor (i.e. the scalar type is used for vector
7144     // code during codegen).
7145     auto *VecTy = FixedVectorType::get(ScalarTy, VF);
7146     if (TTI->getNumberOfParts(VecTy) == VF)
7147       continue;
7148     for (unsigned I = NextInst; I < MaxInst; ++I) {
7149       unsigned OpsWidth = 0;
7150 
7151       if (I + VF > MaxInst)
7152         OpsWidth = MaxInst - I;
7153       else
7154         OpsWidth = VF;
7155 
7156       if (!isPowerOf2_32(OpsWidth))
7157         continue;
7158 
7159       if ((VF > MinVF && OpsWidth <= VF / 2) || (VF == MinVF && OpsWidth < 2))
7160         break;
7161 
7162       ArrayRef<Value *> Ops = VL.slice(I, OpsWidth);
7163       // Check that a previous iteration of this loop did not delete the Value.
7164       if (llvm::any_of(Ops, [&R](Value *V) {
7165             auto *I = dyn_cast<Instruction>(V);
7166             return I && R.isDeleted(I);
7167           }))
7168         continue;
7169 
7170       LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "
7171                         << "\n");
7172 
7173       R.buildTree(Ops);
7174       if (AllowReorder) {
7175         Optional<ArrayRef<unsigned>> Order = R.bestOrder();
7176         if (Order) {
7177           // TODO: reorder tree nodes without tree rebuilding.
7178           SmallVector<Value *, 4> ReorderedOps(Ops.size());
7179           transform(fixupOrderingIndices(*Order), ReorderedOps.begin(),
7180                     [Ops](const unsigned Idx) { return Ops[Idx]; });
7181           R.buildTree(ReorderedOps);
7182         }
7183       }
7184       if (R.isTreeTinyAndNotFullyVectorizable())
7185         continue;
7186 
7187       R.computeMinimumValueSizes();
7188       InstructionCost Cost = R.getTreeCost();
7189       CandidateFound = true;
7190       MinCost = std::min(MinCost, Cost);
7191 
7192       if (Cost < -SLPCostThreshold) {
7193         LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n");
7194         R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList",
7195                                                     cast<Instruction>(Ops[0]))
7196                                  << "SLP vectorized with cost " << ore::NV("Cost", Cost)
7197                                  << " and with tree size "
7198                                  << ore::NV("TreeSize", R.getTreeSize()));
7199 
7200         R.vectorizeTree();
7201         // Move to the next bundle.
7202         I += VF - 1;
7203         NextInst = I + 1;
7204         Changed = true;
7205       }
7206     }
7207   }
7208 
7209   if (!Changed && CandidateFound) {
7210     R.getORE()->emit([&]() {
7211       return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0)
7212              << "List vectorization was possible but not beneficial with cost "
7213              << ore::NV("Cost", MinCost) << " >= "
7214              << ore::NV("Treshold", -SLPCostThreshold);
7215     });
7216   } else if (!Changed) {
7217     R.getORE()->emit([&]() {
7218       return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0)
7219              << "Cannot SLP vectorize list: vectorization was impossible"
7220              << " with available vectorization factors";
7221     });
7222   }
7223   return Changed;
7224 }
7225 
7226 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) {
7227   if (!I)
7228     return false;
7229 
7230   if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I))
7231     return false;
7232 
7233   Value *P = I->getParent();
7234 
7235   // Vectorize in current basic block only.
7236   auto *Op0 = dyn_cast<Instruction>(I->getOperand(0));
7237   auto *Op1 = dyn_cast<Instruction>(I->getOperand(1));
7238   if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P)
7239     return false;
7240 
7241   // Try to vectorize V.
7242   if (tryToVectorizePair(Op0, Op1, R))
7243     return true;
7244 
7245   auto *A = dyn_cast<BinaryOperator>(Op0);
7246   auto *B = dyn_cast<BinaryOperator>(Op1);
7247   // Try to skip B.
7248   if (B && B->hasOneUse()) {
7249     auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0));
7250     auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1));
7251     if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R))
7252       return true;
7253     if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R))
7254       return true;
7255   }
7256 
7257   // Try to skip A.
7258   if (A && A->hasOneUse()) {
7259     auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0));
7260     auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1));
7261     if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R))
7262       return true;
7263     if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R))
7264       return true;
7265   }
7266   return false;
7267 }
7268 
7269 namespace {
7270 
7271 /// Model horizontal reductions.
7272 ///
7273 /// A horizontal reduction is a tree of reduction instructions that has values
7274 /// that can be put into a vector as its leaves. For example:
7275 ///
7276 /// mul mul mul mul
7277 ///  \  /    \  /
7278 ///   +       +
7279 ///    \     /
7280 ///       +
7281 /// This tree has "mul" as its leaf values and "+" as its reduction
7282 /// instructions. A reduction can feed into a store or a binary operation
7283 /// feeding a phi.
7284 ///    ...
7285 ///    \  /
7286 ///     +
7287 ///     |
7288 ///  phi +=
7289 ///
7290 ///  Or:
7291 ///    ...
7292 ///    \  /
7293 ///     +
7294 ///     |
7295 ///   *p =
7296 ///
7297 class HorizontalReduction {
7298   using ReductionOpsType = SmallVector<Value *, 16>;
7299   using ReductionOpsListType = SmallVector<ReductionOpsType, 2>;
7300   ReductionOpsListType ReductionOps;
7301   SmallVector<Value *, 32> ReducedVals;
7302   // Use map vector to make stable output.
7303   MapVector<Instruction *, Value *> ExtraArgs;
7304   WeakTrackingVH ReductionRoot;
7305   /// The type of reduction operation.
7306   RecurKind RdxKind;
7307 
7308   const unsigned INVALID_OPERAND_INDEX = std::numeric_limits<unsigned>::max();
7309 
7310   static bool isCmpSelMinMax(Instruction *I) {
7311     return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) &&
7312            RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I));
7313   }
7314 
7315   // And/or are potentially poison-safe logical patterns like:
7316   // select x, y, false
7317   // select x, true, y
7318   static bool isBoolLogicOp(Instruction *I) {
7319     return match(I, m_LogicalAnd(m_Value(), m_Value())) ||
7320            match(I, m_LogicalOr(m_Value(), m_Value()));
7321   }
7322 
7323   /// Checks if instruction is associative and can be vectorized.
7324   static bool isVectorizable(RecurKind Kind, Instruction *I) {
7325     if (Kind == RecurKind::None)
7326       return false;
7327 
7328     // Integer ops that map to select instructions or intrinsics are fine.
7329     if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) ||
7330         isBoolLogicOp(I))
7331       return true;
7332 
7333     if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) {
7334       // FP min/max are associative except for NaN and -0.0. We do not
7335       // have to rule out -0.0 here because the intrinsic semantics do not
7336       // specify a fixed result for it.
7337       return I->getFastMathFlags().noNaNs();
7338     }
7339 
7340     return I->isAssociative();
7341   }
7342 
7343   static Value *getRdxOperand(Instruction *I, unsigned Index) {
7344     // Poison-safe 'or' takes the form: select X, true, Y
7345     // To make that work with the normal operand processing, we skip the
7346     // true value operand.
7347     // TODO: Change the code and data structures to handle this without a hack.
7348     if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1)
7349       return I->getOperand(2);
7350     return I->getOperand(Index);
7351   }
7352 
7353   /// Checks if the ParentStackElem.first should be marked as a reduction
7354   /// operation with an extra argument or as extra argument itself.
7355   void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem,
7356                     Value *ExtraArg) {
7357     if (ExtraArgs.count(ParentStackElem.first)) {
7358       ExtraArgs[ParentStackElem.first] = nullptr;
7359       // We ran into something like:
7360       // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg.
7361       // The whole ParentStackElem.first should be considered as an extra value
7362       // in this case.
7363       // Do not perform analysis of remaining operands of ParentStackElem.first
7364       // instruction, this whole instruction is an extra argument.
7365       ParentStackElem.second = INVALID_OPERAND_INDEX;
7366     } else {
7367       // We ran into something like:
7368       // ParentStackElem.first += ... + ExtraArg + ...
7369       ExtraArgs[ParentStackElem.first] = ExtraArg;
7370     }
7371   }
7372 
7373   /// Creates reduction operation with the current opcode.
7374   static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS,
7375                          Value *RHS, const Twine &Name, bool UseSelect) {
7376     unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind);
7377     switch (Kind) {
7378     case RecurKind::Add:
7379     case RecurKind::Mul:
7380     case RecurKind::Or:
7381     case RecurKind::And:
7382     case RecurKind::Xor:
7383     case RecurKind::FAdd:
7384     case RecurKind::FMul:
7385       return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
7386                                  Name);
7387     case RecurKind::FMax:
7388       return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS);
7389     case RecurKind::FMin:
7390       return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS);
7391     case RecurKind::SMax:
7392       if (UseSelect) {
7393         Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name);
7394         return Builder.CreateSelect(Cmp, LHS, RHS, Name);
7395       }
7396       return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS);
7397     case RecurKind::SMin:
7398       if (UseSelect) {
7399         Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name);
7400         return Builder.CreateSelect(Cmp, LHS, RHS, Name);
7401       }
7402       return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS);
7403     case RecurKind::UMax:
7404       if (UseSelect) {
7405         Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name);
7406         return Builder.CreateSelect(Cmp, LHS, RHS, Name);
7407       }
7408       return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS);
7409     case RecurKind::UMin:
7410       if (UseSelect) {
7411         Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name);
7412         return Builder.CreateSelect(Cmp, LHS, RHS, Name);
7413       }
7414       return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS);
7415     default:
7416       llvm_unreachable("Unknown reduction operation.");
7417     }
7418   }
7419 
7420   /// Creates reduction operation with the current opcode with the IR flags
7421   /// from \p ReductionOps.
7422   static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS,
7423                          Value *RHS, const Twine &Name,
7424                          const ReductionOpsListType &ReductionOps) {
7425     bool UseSelect = ReductionOps.size() == 2;
7426     assert((!UseSelect || isa<SelectInst>(ReductionOps[1][0])) &&
7427            "Expected cmp + select pairs for reduction");
7428     Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect);
7429     if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) {
7430       if (auto *Sel = dyn_cast<SelectInst>(Op)) {
7431         propagateIRFlags(Sel->getCondition(), ReductionOps[0]);
7432         propagateIRFlags(Op, ReductionOps[1]);
7433         return Op;
7434       }
7435     }
7436     propagateIRFlags(Op, ReductionOps[0]);
7437     return Op;
7438   }
7439 
7440   /// Creates reduction operation with the current opcode with the IR flags
7441   /// from \p I.
7442   static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS,
7443                          Value *RHS, const Twine &Name, Instruction *I) {
7444     auto *SelI = dyn_cast<SelectInst>(I);
7445     Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, SelI != nullptr);
7446     if (SelI && RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) {
7447       if (auto *Sel = dyn_cast<SelectInst>(Op))
7448         propagateIRFlags(Sel->getCondition(), SelI->getCondition());
7449     }
7450     propagateIRFlags(Op, I);
7451     return Op;
7452   }
7453 
7454   static RecurKind getRdxKind(Instruction *I) {
7455     assert(I && "Expected instruction for reduction matching");
7456     TargetTransformInfo::ReductionFlags RdxFlags;
7457     if (match(I, m_Add(m_Value(), m_Value())))
7458       return RecurKind::Add;
7459     if (match(I, m_Mul(m_Value(), m_Value())))
7460       return RecurKind::Mul;
7461     if (match(I, m_And(m_Value(), m_Value())) ||
7462         match(I, m_LogicalAnd(m_Value(), m_Value())))
7463       return RecurKind::And;
7464     if (match(I, m_Or(m_Value(), m_Value())) ||
7465         match(I, m_LogicalOr(m_Value(), m_Value())))
7466       return RecurKind::Or;
7467     if (match(I, m_Xor(m_Value(), m_Value())))
7468       return RecurKind::Xor;
7469     if (match(I, m_FAdd(m_Value(), m_Value())))
7470       return RecurKind::FAdd;
7471     if (match(I, m_FMul(m_Value(), m_Value())))
7472       return RecurKind::FMul;
7473 
7474     if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value())))
7475       return RecurKind::FMax;
7476     if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value())))
7477       return RecurKind::FMin;
7478 
7479     // This matches either cmp+select or intrinsics. SLP is expected to handle
7480     // either form.
7481     // TODO: If we are canonicalizing to intrinsics, we can remove several
7482     //       special-case paths that deal with selects.
7483     if (match(I, m_SMax(m_Value(), m_Value())))
7484       return RecurKind::SMax;
7485     if (match(I, m_SMin(m_Value(), m_Value())))
7486       return RecurKind::SMin;
7487     if (match(I, m_UMax(m_Value(), m_Value())))
7488       return RecurKind::UMax;
7489     if (match(I, m_UMin(m_Value(), m_Value())))
7490       return RecurKind::UMin;
7491 
7492     if (auto *Select = dyn_cast<SelectInst>(I)) {
7493       // Try harder: look for min/max pattern based on instructions producing
7494       // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2).
7495       // During the intermediate stages of SLP, it's very common to have
7496       // pattern like this (since optimizeGatherSequence is run only once
7497       // at the end):
7498       // %1 = extractelement <2 x i32> %a, i32 0
7499       // %2 = extractelement <2 x i32> %a, i32 1
7500       // %cond = icmp sgt i32 %1, %2
7501       // %3 = extractelement <2 x i32> %a, i32 0
7502       // %4 = extractelement <2 x i32> %a, i32 1
7503       // %select = select i1 %cond, i32 %3, i32 %4
7504       CmpInst::Predicate Pred;
7505       Instruction *L1;
7506       Instruction *L2;
7507 
7508       Value *LHS = Select->getTrueValue();
7509       Value *RHS = Select->getFalseValue();
7510       Value *Cond = Select->getCondition();
7511 
7512       // TODO: Support inverse predicates.
7513       if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) {
7514         if (!isa<ExtractElementInst>(RHS) ||
7515             !L2->isIdenticalTo(cast<Instruction>(RHS)))
7516           return RecurKind::None;
7517       } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) {
7518         if (!isa<ExtractElementInst>(LHS) ||
7519             !L1->isIdenticalTo(cast<Instruction>(LHS)))
7520           return RecurKind::None;
7521       } else {
7522         if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS))
7523           return RecurKind::None;
7524         if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) ||
7525             !L1->isIdenticalTo(cast<Instruction>(LHS)) ||
7526             !L2->isIdenticalTo(cast<Instruction>(RHS)))
7527           return RecurKind::None;
7528       }
7529 
7530       TargetTransformInfo::ReductionFlags RdxFlags;
7531       switch (Pred) {
7532       default:
7533         return RecurKind::None;
7534       case CmpInst::ICMP_SGT:
7535       case CmpInst::ICMP_SGE:
7536         return RecurKind::SMax;
7537       case CmpInst::ICMP_SLT:
7538       case CmpInst::ICMP_SLE:
7539         return RecurKind::SMin;
7540       case CmpInst::ICMP_UGT:
7541       case CmpInst::ICMP_UGE:
7542         return RecurKind::UMax;
7543       case CmpInst::ICMP_ULT:
7544       case CmpInst::ICMP_ULE:
7545         return RecurKind::UMin;
7546       }
7547     }
7548     return RecurKind::None;
7549   }
7550 
7551   /// Get the index of the first operand.
7552   static unsigned getFirstOperandIndex(Instruction *I) {
7553     return isCmpSelMinMax(I) ? 1 : 0;
7554   }
7555 
7556   /// Total number of operands in the reduction operation.
7557   static unsigned getNumberOfOperands(Instruction *I) {
7558     return isCmpSelMinMax(I) ? 3 : 2;
7559   }
7560 
7561   /// Checks if the instruction is in basic block \p BB.
7562   /// For a cmp+sel min/max reduction check that both ops are in \p BB.
7563   static bool hasSameParent(Instruction *I, BasicBlock *BB) {
7564     if (isCmpSelMinMax(I)) {
7565       auto *Sel = cast<SelectInst>(I);
7566       auto *Cmp = cast<Instruction>(Sel->getCondition());
7567       return Sel->getParent() == BB && Cmp->getParent() == BB;
7568     }
7569     return I->getParent() == BB;
7570   }
7571 
7572   /// Expected number of uses for reduction operations/reduced values.
7573   static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) {
7574     if (IsCmpSelMinMax) {
7575       // SelectInst must be used twice while the condition op must have single
7576       // use only.
7577       if (auto *Sel = dyn_cast<SelectInst>(I))
7578         return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse();
7579       return I->hasNUses(2);
7580     }
7581 
7582     // Arithmetic reduction operation must be used once only.
7583     return I->hasOneUse();
7584   }
7585 
7586   /// Initializes the list of reduction operations.
7587   void initReductionOps(Instruction *I) {
7588     if (isCmpSelMinMax(I))
7589       ReductionOps.assign(2, ReductionOpsType());
7590     else
7591       ReductionOps.assign(1, ReductionOpsType());
7592   }
7593 
7594   /// Add all reduction operations for the reduction instruction \p I.
7595   void addReductionOps(Instruction *I) {
7596     if (isCmpSelMinMax(I)) {
7597       ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition());
7598       ReductionOps[1].emplace_back(I);
7599     } else {
7600       ReductionOps[0].emplace_back(I);
7601     }
7602   }
7603 
7604   static Value *getLHS(RecurKind Kind, Instruction *I) {
7605     if (Kind == RecurKind::None)
7606       return nullptr;
7607     return I->getOperand(getFirstOperandIndex(I));
7608   }
7609   static Value *getRHS(RecurKind Kind, Instruction *I) {
7610     if (Kind == RecurKind::None)
7611       return nullptr;
7612     return I->getOperand(getFirstOperandIndex(I) + 1);
7613   }
7614 
7615 public:
7616   HorizontalReduction() = default;
7617 
7618   /// Try to find a reduction tree.
7619   bool matchAssociativeReduction(PHINode *Phi, Instruction *Inst) {
7620     assert((!Phi || is_contained(Phi->operands(), Inst)) &&
7621            "Phi needs to use the binary operator");
7622     assert((isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) ||
7623             isa<IntrinsicInst>(Inst)) &&
7624            "Expected binop, select, or intrinsic for reduction matching");
7625     RdxKind = getRdxKind(Inst);
7626 
7627     // We could have a initial reductions that is not an add.
7628     //  r *= v1 + v2 + v3 + v4
7629     // In such a case start looking for a tree rooted in the first '+'.
7630     if (Phi) {
7631       if (getLHS(RdxKind, Inst) == Phi) {
7632         Phi = nullptr;
7633         Inst = dyn_cast<Instruction>(getRHS(RdxKind, Inst));
7634         if (!Inst)
7635           return false;
7636         RdxKind = getRdxKind(Inst);
7637       } else if (getRHS(RdxKind, Inst) == Phi) {
7638         Phi = nullptr;
7639         Inst = dyn_cast<Instruction>(getLHS(RdxKind, Inst));
7640         if (!Inst)
7641           return false;
7642         RdxKind = getRdxKind(Inst);
7643       }
7644     }
7645 
7646     if (!isVectorizable(RdxKind, Inst))
7647       return false;
7648 
7649     // Analyze "regular" integer/FP types for reductions - no target-specific
7650     // types or pointers.
7651     Type *Ty = Inst->getType();
7652     if (!isValidElementType(Ty) || Ty->isPointerTy())
7653       return false;
7654 
7655     // Though the ultimate reduction may have multiple uses, its condition must
7656     // have only single use.
7657     if (auto *Sel = dyn_cast<SelectInst>(Inst))
7658       if (!Sel->getCondition()->hasOneUse())
7659         return false;
7660 
7661     ReductionRoot = Inst;
7662 
7663     // The opcode for leaf values that we perform a reduction on.
7664     // For example: load(x) + load(y) + load(z) + fptoui(w)
7665     // The leaf opcode for 'w' does not match, so we don't include it as a
7666     // potential candidate for the reduction.
7667     unsigned LeafOpcode = 0;
7668 
7669     // Post-order traverse the reduction tree starting at Inst. We only handle
7670     // true trees containing binary operators or selects.
7671     SmallVector<std::pair<Instruction *, unsigned>, 32> Stack;
7672     Stack.push_back(std::make_pair(Inst, getFirstOperandIndex(Inst)));
7673     initReductionOps(Inst);
7674     while (!Stack.empty()) {
7675       Instruction *TreeN = Stack.back().first;
7676       unsigned EdgeToVisit = Stack.back().second++;
7677       const RecurKind TreeRdxKind = getRdxKind(TreeN);
7678       bool IsReducedValue = TreeRdxKind != RdxKind;
7679 
7680       // Postorder visit.
7681       if (IsReducedValue || EdgeToVisit >= getNumberOfOperands(TreeN)) {
7682         if (IsReducedValue)
7683           ReducedVals.push_back(TreeN);
7684         else {
7685           auto ExtraArgsIter = ExtraArgs.find(TreeN);
7686           if (ExtraArgsIter != ExtraArgs.end() && !ExtraArgsIter->second) {
7687             // Check if TreeN is an extra argument of its parent operation.
7688             if (Stack.size() <= 1) {
7689               // TreeN can't be an extra argument as it is a root reduction
7690               // operation.
7691               return false;
7692             }
7693             // Yes, TreeN is an extra argument, do not add it to a list of
7694             // reduction operations.
7695             // Stack[Stack.size() - 2] always points to the parent operation.
7696             markExtraArg(Stack[Stack.size() - 2], TreeN);
7697             ExtraArgs.erase(TreeN);
7698           } else
7699             addReductionOps(TreeN);
7700         }
7701         // Retract.
7702         Stack.pop_back();
7703         continue;
7704       }
7705 
7706       // Visit operands.
7707       Value *EdgeVal = getRdxOperand(TreeN, EdgeToVisit);
7708       auto *EdgeInst = dyn_cast<Instruction>(EdgeVal);
7709       if (!EdgeInst) {
7710         // Edge value is not a reduction instruction or a leaf instruction.
7711         // (It may be a constant, function argument, or something else.)
7712         markExtraArg(Stack.back(), EdgeVal);
7713         continue;
7714       }
7715       RecurKind EdgeRdxKind = getRdxKind(EdgeInst);
7716       // Continue analysis if the next operand is a reduction operation or
7717       // (possibly) a leaf value. If the leaf value opcode is not set,
7718       // the first met operation != reduction operation is considered as the
7719       // leaf opcode.
7720       // Only handle trees in the current basic block.
7721       // Each tree node needs to have minimal number of users except for the
7722       // ultimate reduction.
7723       const bool IsRdxInst = EdgeRdxKind == RdxKind;
7724       if (EdgeInst != Phi && EdgeInst != Inst &&
7725           hasSameParent(EdgeInst, Inst->getParent()) &&
7726           hasRequiredNumberOfUses(isCmpSelMinMax(Inst), EdgeInst) &&
7727           (!LeafOpcode || LeafOpcode == EdgeInst->getOpcode() || IsRdxInst)) {
7728         if (IsRdxInst) {
7729           // We need to be able to reassociate the reduction operations.
7730           if (!isVectorizable(EdgeRdxKind, EdgeInst)) {
7731             // I is an extra argument for TreeN (its parent operation).
7732             markExtraArg(Stack.back(), EdgeInst);
7733             continue;
7734           }
7735         } else if (!LeafOpcode) {
7736           LeafOpcode = EdgeInst->getOpcode();
7737         }
7738         Stack.push_back(
7739             std::make_pair(EdgeInst, getFirstOperandIndex(EdgeInst)));
7740         continue;
7741       }
7742       // I is an extra argument for TreeN (its parent operation).
7743       markExtraArg(Stack.back(), EdgeInst);
7744     }
7745     return true;
7746   }
7747 
7748   /// Attempt to vectorize the tree found by matchAssociativeReduction.
7749   bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) {
7750     // If there are a sufficient number of reduction values, reduce
7751     // to a nearby power-of-2. We can safely generate oversized
7752     // vectors and rely on the backend to split them to legal sizes.
7753     unsigned NumReducedVals = ReducedVals.size();
7754     if (NumReducedVals < 4)
7755       return false;
7756 
7757     // Intersect the fast-math-flags from all reduction operations.
7758     FastMathFlags RdxFMF;
7759     RdxFMF.set();
7760     for (ReductionOpsType &RdxOp : ReductionOps) {
7761       for (Value *RdxVal : RdxOp) {
7762         if (auto *FPMO = dyn_cast<FPMathOperator>(RdxVal))
7763           RdxFMF &= FPMO->getFastMathFlags();
7764       }
7765     }
7766 
7767     IRBuilder<> Builder(cast<Instruction>(ReductionRoot));
7768     Builder.setFastMathFlags(RdxFMF);
7769 
7770     BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues;
7771     // The same extra argument may be used several times, so log each attempt
7772     // to use it.
7773     for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) {
7774       assert(Pair.first && "DebugLoc must be set.");
7775       ExternallyUsedValues[Pair.second].push_back(Pair.first);
7776     }
7777 
7778     // The compare instruction of a min/max is the insertion point for new
7779     // instructions and may be replaced with a new compare instruction.
7780     auto getCmpForMinMaxReduction = [](Instruction *RdxRootInst) {
7781       assert(isa<SelectInst>(RdxRootInst) &&
7782              "Expected min/max reduction to have select root instruction");
7783       Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition();
7784       assert(isa<Instruction>(ScalarCond) &&
7785              "Expected min/max reduction to have compare condition");
7786       return cast<Instruction>(ScalarCond);
7787     };
7788 
7789     // The reduction root is used as the insertion point for new instructions,
7790     // so set it as externally used to prevent it from being deleted.
7791     ExternallyUsedValues[ReductionRoot];
7792     SmallVector<Value *, 16> IgnoreList;
7793     for (ReductionOpsType &RdxOp : ReductionOps)
7794       IgnoreList.append(RdxOp.begin(), RdxOp.end());
7795 
7796     unsigned ReduxWidth = PowerOf2Floor(NumReducedVals);
7797     if (NumReducedVals > ReduxWidth) {
7798       // In the loop below, we are building a tree based on a window of
7799       // 'ReduxWidth' values.
7800       // If the operands of those values have common traits (compare predicate,
7801       // constant operand, etc), then we want to group those together to
7802       // minimize the cost of the reduction.
7803 
7804       // TODO: This should be extended to count common operands for
7805       //       compares and binops.
7806 
7807       // Step 1: Count the number of times each compare predicate occurs.
7808       SmallDenseMap<unsigned, unsigned> PredCountMap;
7809       for (Value *RdxVal : ReducedVals) {
7810         CmpInst::Predicate Pred;
7811         if (match(RdxVal, m_Cmp(Pred, m_Value(), m_Value())))
7812           ++PredCountMap[Pred];
7813       }
7814       // Step 2: Sort the values so the most common predicates come first.
7815       stable_sort(ReducedVals, [&PredCountMap](Value *A, Value *B) {
7816         CmpInst::Predicate PredA, PredB;
7817         if (match(A, m_Cmp(PredA, m_Value(), m_Value())) &&
7818             match(B, m_Cmp(PredB, m_Value(), m_Value()))) {
7819           return PredCountMap[PredA] > PredCountMap[PredB];
7820         }
7821         return false;
7822       });
7823     }
7824 
7825     Value *VectorizedTree = nullptr;
7826     unsigned i = 0;
7827     while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) {
7828       ArrayRef<Value *> VL(&ReducedVals[i], ReduxWidth);
7829       V.buildTree(VL, ExternallyUsedValues, IgnoreList);
7830       Optional<ArrayRef<unsigned>> Order = V.bestOrder();
7831       if (Order) {
7832         assert(Order->size() == VL.size() &&
7833                "Order size must be the same as number of vectorized "
7834                "instructions.");
7835         // TODO: reorder tree nodes without tree rebuilding.
7836         SmallVector<Value *, 4> ReorderedOps(VL.size());
7837         transform(fixupOrderingIndices(*Order), ReorderedOps.begin(),
7838                   [VL](const unsigned Idx) { return VL[Idx]; });
7839         V.buildTree(ReorderedOps, ExternallyUsedValues, IgnoreList);
7840       }
7841       if (V.isTreeTinyAndNotFullyVectorizable())
7842         break;
7843       if (V.isLoadCombineReductionCandidate(RdxKind))
7844         break;
7845 
7846       // For a poison-safe boolean logic reduction, do not replace select
7847       // instructions with logic ops. All reduced values will be frozen (see
7848       // below) to prevent leaking poison.
7849       if (isa<SelectInst>(ReductionRoot) &&
7850           isBoolLogicOp(cast<Instruction>(ReductionRoot)) &&
7851           NumReducedVals != ReduxWidth)
7852         break;
7853 
7854       V.computeMinimumValueSizes();
7855 
7856       // Estimate cost.
7857       InstructionCost TreeCost =
7858           V.getTreeCost(makeArrayRef(&ReducedVals[i], ReduxWidth));
7859       InstructionCost ReductionCost =
7860           getReductionCost(TTI, ReducedVals[i], ReduxWidth, RdxFMF);
7861       InstructionCost Cost = TreeCost + ReductionCost;
7862       if (!Cost.isValid()) {
7863         LLVM_DEBUG(dbgs() << "Encountered invalid baseline cost.\n");
7864         return false;
7865       }
7866       if (Cost >= -SLPCostThreshold) {
7867         V.getORE()->emit([&]() {
7868           return OptimizationRemarkMissed(SV_NAME, "HorSLPNotBeneficial",
7869                                           cast<Instruction>(VL[0]))
7870                  << "Vectorizing horizontal reduction is possible"
7871                  << "but not beneficial with cost " << ore::NV("Cost", Cost)
7872                  << " and threshold "
7873                  << ore::NV("Threshold", -SLPCostThreshold);
7874         });
7875         break;
7876       }
7877 
7878       LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:"
7879                         << Cost << ". (HorRdx)\n");
7880       V.getORE()->emit([&]() {
7881         return OptimizationRemark(SV_NAME, "VectorizedHorizontalReduction",
7882                                   cast<Instruction>(VL[0]))
7883                << "Vectorized horizontal reduction with cost "
7884                << ore::NV("Cost", Cost) << " and with tree size "
7885                << ore::NV("TreeSize", V.getTreeSize());
7886       });
7887 
7888       // Vectorize a tree.
7889       DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc();
7890       Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues);
7891 
7892       // Emit a reduction. If the root is a select (min/max idiom), the insert
7893       // point is the compare condition of that select.
7894       Instruction *RdxRootInst = cast<Instruction>(ReductionRoot);
7895       if (isCmpSelMinMax(RdxRootInst))
7896         Builder.SetInsertPoint(getCmpForMinMaxReduction(RdxRootInst));
7897       else
7898         Builder.SetInsertPoint(RdxRootInst);
7899 
7900       // To prevent poison from leaking across what used to be sequential, safe,
7901       // scalar boolean logic operations, the reduction operand must be frozen.
7902       if (isa<SelectInst>(RdxRootInst) && isBoolLogicOp(RdxRootInst))
7903         VectorizedRoot = Builder.CreateFreeze(VectorizedRoot);
7904 
7905       Value *ReducedSubTree =
7906           emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI);
7907 
7908       if (!VectorizedTree) {
7909         // Initialize the final value in the reduction.
7910         VectorizedTree = ReducedSubTree;
7911       } else {
7912         // Update the final value in the reduction.
7913         Builder.SetCurrentDebugLocation(Loc);
7914         VectorizedTree = createOp(Builder, RdxKind, VectorizedTree,
7915                                   ReducedSubTree, "op.rdx", ReductionOps);
7916       }
7917       i += ReduxWidth;
7918       ReduxWidth = PowerOf2Floor(NumReducedVals - i);
7919     }
7920 
7921     if (VectorizedTree) {
7922       // Finish the reduction.
7923       for (; i < NumReducedVals; ++i) {
7924         auto *I = cast<Instruction>(ReducedVals[i]);
7925         Builder.SetCurrentDebugLocation(I->getDebugLoc());
7926         VectorizedTree =
7927             createOp(Builder, RdxKind, VectorizedTree, I, "", ReductionOps);
7928       }
7929       for (auto &Pair : ExternallyUsedValues) {
7930         // Add each externally used value to the final reduction.
7931         for (auto *I : Pair.second) {
7932           Builder.SetCurrentDebugLocation(I->getDebugLoc());
7933           VectorizedTree = createOp(Builder, RdxKind, VectorizedTree,
7934                                     Pair.first, "op.extra", I);
7935         }
7936       }
7937 
7938       ReductionRoot->replaceAllUsesWith(VectorizedTree);
7939 
7940       // Mark all scalar reduction ops for deletion, they are replaced by the
7941       // vector reductions.
7942       V.eraseInstructions(IgnoreList);
7943     }
7944     return VectorizedTree != nullptr;
7945   }
7946 
7947   unsigned numReductionValues() const { return ReducedVals.size(); }
7948 
7949 private:
7950   /// Calculate the cost of a reduction.
7951   InstructionCost getReductionCost(TargetTransformInfo *TTI,
7952                                    Value *FirstReducedVal, unsigned ReduxWidth,
7953                                    FastMathFlags FMF) {
7954     Type *ScalarTy = FirstReducedVal->getType();
7955     FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth);
7956     InstructionCost VectorCost, ScalarCost;
7957     switch (RdxKind) {
7958     case RecurKind::Add:
7959     case RecurKind::Mul:
7960     case RecurKind::Or:
7961     case RecurKind::And:
7962     case RecurKind::Xor:
7963     case RecurKind::FAdd:
7964     case RecurKind::FMul: {
7965       unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind);
7966       VectorCost = TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF);
7967       ScalarCost = TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy);
7968       break;
7969     }
7970     case RecurKind::FMax:
7971     case RecurKind::FMin: {
7972       auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy));
7973       VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy,
7974                                                /*unsigned=*/false);
7975       ScalarCost =
7976           TTI->getCmpSelInstrCost(Instruction::FCmp, ScalarTy) +
7977           TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
7978                                   CmpInst::makeCmpResultType(ScalarTy));
7979       break;
7980     }
7981     case RecurKind::SMax:
7982     case RecurKind::SMin:
7983     case RecurKind::UMax:
7984     case RecurKind::UMin: {
7985       auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy));
7986       bool IsUnsigned =
7987           RdxKind == RecurKind::UMax || RdxKind == RecurKind::UMin;
7988       VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy, IsUnsigned);
7989       ScalarCost =
7990           TTI->getCmpSelInstrCost(Instruction::ICmp, ScalarTy) +
7991           TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
7992                                   CmpInst::makeCmpResultType(ScalarTy));
7993       break;
7994     }
7995     default:
7996       llvm_unreachable("Expected arithmetic or min/max reduction operation");
7997     }
7998 
7999     // Scalar cost is repeated for N-1 elements.
8000     ScalarCost *= (ReduxWidth - 1);
8001     LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost
8002                       << " for reduction that starts with " << *FirstReducedVal
8003                       << " (It is a splitting reduction)\n");
8004     return VectorCost - ScalarCost;
8005   }
8006 
8007   /// Emit a horizontal reduction of the vectorized value.
8008   Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder,
8009                        unsigned ReduxWidth, const TargetTransformInfo *TTI) {
8010     assert(VectorizedValue && "Need to have a vectorized tree node");
8011     assert(isPowerOf2_32(ReduxWidth) &&
8012            "We only handle power-of-two reductions for now");
8013 
8014     return createSimpleTargetReduction(Builder, TTI, VectorizedValue, RdxKind,
8015                                        ReductionOps.back());
8016   }
8017 };
8018 
8019 } // end anonymous namespace
8020 
8021 static Optional<unsigned> getAggregateSize(Instruction *InsertInst) {
8022   if (auto *IE = dyn_cast<InsertElementInst>(InsertInst))
8023     return cast<FixedVectorType>(IE->getType())->getNumElements();
8024 
8025   unsigned AggregateSize = 1;
8026   auto *IV = cast<InsertValueInst>(InsertInst);
8027   Type *CurrentType = IV->getType();
8028   do {
8029     if (auto *ST = dyn_cast<StructType>(CurrentType)) {
8030       for (auto *Elt : ST->elements())
8031         if (Elt != ST->getElementType(0)) // check homogeneity
8032           return None;
8033       AggregateSize *= ST->getNumElements();
8034       CurrentType = ST->getElementType(0);
8035     } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) {
8036       AggregateSize *= AT->getNumElements();
8037       CurrentType = AT->getElementType();
8038     } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) {
8039       AggregateSize *= VT->getNumElements();
8040       return AggregateSize;
8041     } else if (CurrentType->isSingleValueType()) {
8042       return AggregateSize;
8043     } else {
8044       return None;
8045     }
8046   } while (true);
8047 }
8048 
8049 static bool findBuildAggregate_rec(Instruction *LastInsertInst,
8050                                    TargetTransformInfo *TTI,
8051                                    SmallVectorImpl<Value *> &BuildVectorOpds,
8052                                    SmallVectorImpl<Value *> &InsertElts,
8053                                    unsigned OperandOffset) {
8054   do {
8055     Value *InsertedOperand = LastInsertInst->getOperand(1);
8056     Optional<int> OperandIndex = getInsertIndex(LastInsertInst, OperandOffset);
8057     if (!OperandIndex)
8058       return false;
8059     if (isa<InsertElementInst>(InsertedOperand) ||
8060         isa<InsertValueInst>(InsertedOperand)) {
8061       if (!findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI,
8062                                   BuildVectorOpds, InsertElts, *OperandIndex))
8063         return false;
8064     } else {
8065       BuildVectorOpds[*OperandIndex] = InsertedOperand;
8066       InsertElts[*OperandIndex] = LastInsertInst;
8067     }
8068     LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0));
8069   } while (LastInsertInst != nullptr &&
8070            (isa<InsertValueInst>(LastInsertInst) ||
8071             isa<InsertElementInst>(LastInsertInst)) &&
8072            LastInsertInst->hasOneUse());
8073   return true;
8074 }
8075 
8076 /// Recognize construction of vectors like
8077 ///  %ra = insertelement <4 x float> poison, float %s0, i32 0
8078 ///  %rb = insertelement <4 x float> %ra, float %s1, i32 1
8079 ///  %rc = insertelement <4 x float> %rb, float %s2, i32 2
8080 ///  %rd = insertelement <4 x float> %rc, float %s3, i32 3
8081 ///  starting from the last insertelement or insertvalue instruction.
8082 ///
8083 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>},
8084 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on.
8085 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples.
8086 ///
8087 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type.
8088 ///
8089 /// \return true if it matches.
8090 static bool findBuildAggregate(Instruction *LastInsertInst,
8091                                TargetTransformInfo *TTI,
8092                                SmallVectorImpl<Value *> &BuildVectorOpds,
8093                                SmallVectorImpl<Value *> &InsertElts) {
8094 
8095   assert((isa<InsertElementInst>(LastInsertInst) ||
8096           isa<InsertValueInst>(LastInsertInst)) &&
8097          "Expected insertelement or insertvalue instruction!");
8098 
8099   assert((BuildVectorOpds.empty() && InsertElts.empty()) &&
8100          "Expected empty result vectors!");
8101 
8102   Optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst);
8103   if (!AggregateSize)
8104     return false;
8105   BuildVectorOpds.resize(*AggregateSize);
8106   InsertElts.resize(*AggregateSize);
8107 
8108   if (findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts,
8109                              0)) {
8110     llvm::erase_value(BuildVectorOpds, nullptr);
8111     llvm::erase_value(InsertElts, nullptr);
8112     if (BuildVectorOpds.size() >= 2)
8113       return true;
8114   }
8115 
8116   return false;
8117 }
8118 
8119 /// Try and get a reduction value from a phi node.
8120 ///
8121 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions
8122 /// if they come from either \p ParentBB or a containing loop latch.
8123 ///
8124 /// \returns A candidate reduction value if possible, or \code nullptr \endcode
8125 /// if not possible.
8126 static Value *getReductionValue(const DominatorTree *DT, PHINode *P,
8127                                 BasicBlock *ParentBB, LoopInfo *LI) {
8128   // There are situations where the reduction value is not dominated by the
8129   // reduction phi. Vectorizing such cases has been reported to cause
8130   // miscompiles. See PR25787.
8131   auto DominatedReduxValue = [&](Value *R) {
8132     return isa<Instruction>(R) &&
8133            DT->dominates(P->getParent(), cast<Instruction>(R)->getParent());
8134   };
8135 
8136   Value *Rdx = nullptr;
8137 
8138   // Return the incoming value if it comes from the same BB as the phi node.
8139   if (P->getIncomingBlock(0) == ParentBB) {
8140     Rdx = P->getIncomingValue(0);
8141   } else if (P->getIncomingBlock(1) == ParentBB) {
8142     Rdx = P->getIncomingValue(1);
8143   }
8144 
8145   if (Rdx && DominatedReduxValue(Rdx))
8146     return Rdx;
8147 
8148   // Otherwise, check whether we have a loop latch to look at.
8149   Loop *BBL = LI->getLoopFor(ParentBB);
8150   if (!BBL)
8151     return nullptr;
8152   BasicBlock *BBLatch = BBL->getLoopLatch();
8153   if (!BBLatch)
8154     return nullptr;
8155 
8156   // There is a loop latch, return the incoming value if it comes from
8157   // that. This reduction pattern occasionally turns up.
8158   if (P->getIncomingBlock(0) == BBLatch) {
8159     Rdx = P->getIncomingValue(0);
8160   } else if (P->getIncomingBlock(1) == BBLatch) {
8161     Rdx = P->getIncomingValue(1);
8162   }
8163 
8164   if (Rdx && DominatedReduxValue(Rdx))
8165     return Rdx;
8166 
8167   return nullptr;
8168 }
8169 
8170 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) {
8171   if (match(I, m_BinOp(m_Value(V0), m_Value(V1))))
8172     return true;
8173   if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1))))
8174     return true;
8175   if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1))))
8176     return true;
8177   if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1))))
8178     return true;
8179   if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1))))
8180     return true;
8181   if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1))))
8182     return true;
8183   if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1))))
8184     return true;
8185   return false;
8186 }
8187 
8188 /// Attempt to reduce a horizontal reduction.
8189 /// If it is legal to match a horizontal reduction feeding the phi node \a P
8190 /// with reduction operators \a Root (or one of its operands) in a basic block
8191 /// \a BB, then check if it can be done. If horizontal reduction is not found
8192 /// and root instruction is a binary operation, vectorization of the operands is
8193 /// attempted.
8194 /// \returns true if a horizontal reduction was matched and reduced or operands
8195 /// of one of the binary instruction were vectorized.
8196 /// \returns false if a horizontal reduction was not matched (or not possible)
8197 /// or no vectorization of any binary operation feeding \a Root instruction was
8198 /// performed.
8199 static bool tryToVectorizeHorReductionOrInstOperands(
8200     PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R,
8201     TargetTransformInfo *TTI,
8202     const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) {
8203   if (!ShouldVectorizeHor)
8204     return false;
8205 
8206   if (!Root)
8207     return false;
8208 
8209   if (Root->getParent() != BB || isa<PHINode>(Root))
8210     return false;
8211   // Start analysis starting from Root instruction. If horizontal reduction is
8212   // found, try to vectorize it. If it is not a horizontal reduction or
8213   // vectorization is not possible or not effective, and currently analyzed
8214   // instruction is a binary operation, try to vectorize the operands, using
8215   // pre-order DFS traversal order. If the operands were not vectorized, repeat
8216   // the same procedure considering each operand as a possible root of the
8217   // horizontal reduction.
8218   // Interrupt the process if the Root instruction itself was vectorized or all
8219   // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized.
8220   // Skip the analysis of CmpInsts.Compiler implements postanalysis of the
8221   // CmpInsts so we can skip extra attempts in
8222   // tryToVectorizeHorReductionOrInstOperands and save compile time.
8223   SmallVector<std::pair<Instruction *, unsigned>, 8> Stack(1, {Root, 0});
8224   SmallPtrSet<Value *, 8> VisitedInstrs;
8225   bool Res = false;
8226   while (!Stack.empty()) {
8227     Instruction *Inst;
8228     unsigned Level;
8229     std::tie(Inst, Level) = Stack.pop_back_val();
8230     // Do not try to analyze instruction that has already been vectorized.
8231     // This may happen when we vectorize instruction operands on a previous
8232     // iteration while stack was populated before that happened.
8233     if (R.isDeleted(Inst))
8234       continue;
8235     Value *B0, *B1;
8236     bool IsBinop = matchRdxBop(Inst, B0, B1);
8237     bool IsSelect = match(Inst, m_Select(m_Value(), m_Value(), m_Value()));
8238     if (IsBinop || IsSelect) {
8239       HorizontalReduction HorRdx;
8240       if (HorRdx.matchAssociativeReduction(P, Inst)) {
8241         if (HorRdx.tryToReduce(R, TTI)) {
8242           Res = true;
8243           // Set P to nullptr to avoid re-analysis of phi node in
8244           // matchAssociativeReduction function unless this is the root node.
8245           P = nullptr;
8246           continue;
8247         }
8248       }
8249       if (P && IsBinop) {
8250         Inst = dyn_cast<Instruction>(B0);
8251         if (Inst == P)
8252           Inst = dyn_cast<Instruction>(B1);
8253         if (!Inst) {
8254           // Set P to nullptr to avoid re-analysis of phi node in
8255           // matchAssociativeReduction function unless this is the root node.
8256           P = nullptr;
8257           continue;
8258         }
8259       }
8260     }
8261     // Set P to nullptr to avoid re-analysis of phi node in
8262     // matchAssociativeReduction function unless this is the root node.
8263     P = nullptr;
8264     // Do not try to vectorize CmpInst operands, this is done separately.
8265     if (!isa<CmpInst>(Inst) && Vectorize(Inst, R)) {
8266       Res = true;
8267       continue;
8268     }
8269 
8270     // Try to vectorize operands.
8271     // Continue analysis for the instruction from the same basic block only to
8272     // save compile time.
8273     if (++Level < RecursionMaxDepth)
8274       for (auto *Op : Inst->operand_values())
8275         if (VisitedInstrs.insert(Op).second)
8276           if (auto *I = dyn_cast<Instruction>(Op))
8277             // Do not try to vectorize CmpInst operands,  this is done
8278             // separately.
8279             if (!isa<PHINode>(I) && !isa<CmpInst>(I) && !R.isDeleted(I) &&
8280                 I->getParent() == BB)
8281               Stack.emplace_back(I, Level);
8282   }
8283   return Res;
8284 }
8285 
8286 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V,
8287                                                  BasicBlock *BB, BoUpSLP &R,
8288                                                  TargetTransformInfo *TTI) {
8289   auto *I = dyn_cast_or_null<Instruction>(V);
8290   if (!I)
8291     return false;
8292 
8293   if (!isa<BinaryOperator>(I))
8294     P = nullptr;
8295   // Try to match and vectorize a horizontal reduction.
8296   auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool {
8297     return tryToVectorize(I, R);
8298   };
8299   return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI,
8300                                                   ExtraVectorization);
8301 }
8302 
8303 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI,
8304                                                  BasicBlock *BB, BoUpSLP &R) {
8305   const DataLayout &DL = BB->getModule()->getDataLayout();
8306   if (!R.canMapToVector(IVI->getType(), DL))
8307     return false;
8308 
8309   SmallVector<Value *, 16> BuildVectorOpds;
8310   SmallVector<Value *, 16> BuildVectorInsts;
8311   if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts))
8312     return false;
8313 
8314   LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n");
8315   // Aggregate value is unlikely to be processed in vector register, we need to
8316   // extract scalars into scalar registers, so NeedExtraction is set true.
8317   return tryToVectorizeList(BuildVectorOpds, R, /*AllowReorder=*/false);
8318 }
8319 
8320 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI,
8321                                                    BasicBlock *BB, BoUpSLP &R) {
8322   SmallVector<Value *, 16> BuildVectorInsts;
8323   SmallVector<Value *, 16> BuildVectorOpds;
8324   SmallVector<int> Mask;
8325   if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) ||
8326       (llvm::all_of(BuildVectorOpds,
8327                     [](Value *V) { return isa<ExtractElementInst>(V); }) &&
8328        isShuffle(BuildVectorOpds, Mask)))
8329     return false;
8330 
8331   LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n");
8332   return tryToVectorizeList(BuildVectorInsts, R, /*AllowReorder=*/true);
8333 }
8334 
8335 bool SLPVectorizerPass::vectorizeSimpleInstructions(
8336     SmallVectorImpl<Instruction *> &Instructions, BasicBlock *BB, BoUpSLP &R,
8337     bool AtTerminator) {
8338   bool OpsChanged = false;
8339   SmallVector<Instruction *, 4> PostponedCmps;
8340   for (auto *I : reverse(Instructions)) {
8341     if (R.isDeleted(I))
8342       continue;
8343     if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I))
8344       OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R);
8345     else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I))
8346       OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R);
8347     else if (isa<CmpInst>(I))
8348       PostponedCmps.push_back(I);
8349   }
8350   if (AtTerminator) {
8351     // Try to find reductions first.
8352     for (Instruction *I : PostponedCmps) {
8353       if (R.isDeleted(I))
8354         continue;
8355       for (Value *Op : I->operands())
8356         OpsChanged |= vectorizeRootInstruction(nullptr, Op, BB, R, TTI);
8357     }
8358     // Try to vectorize operands as vector bundles.
8359     for (Instruction *I : PostponedCmps) {
8360       if (R.isDeleted(I))
8361         continue;
8362       OpsChanged |= tryToVectorize(I, R);
8363     }
8364     Instructions.clear();
8365   } else {
8366     // Insert in reverse order since the PostponedCmps vector was filled in
8367     // reverse order.
8368     Instructions.assign(PostponedCmps.rbegin(), PostponedCmps.rend());
8369   }
8370   return OpsChanged;
8371 }
8372 
8373 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
8374   bool Changed = false;
8375   SmallVector<Value *, 4> Incoming;
8376   SmallPtrSet<Value *, 16> VisitedInstrs;
8377   // Maps phi nodes to the non-phi nodes found in the use tree for each phi
8378   // node. Allows better to identify the chains that can be vectorized in the
8379   // better way.
8380   DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes;
8381 
8382   bool HaveVectorizedPhiNodes = true;
8383   while (HaveVectorizedPhiNodes) {
8384     HaveVectorizedPhiNodes = false;
8385 
8386     // Collect the incoming values from the PHIs.
8387     Incoming.clear();
8388     for (Instruction &I : *BB) {
8389       PHINode *P = dyn_cast<PHINode>(&I);
8390       if (!P)
8391         break;
8392 
8393       // No need to analyze deleted, vectorized and non-vectorizable
8394       // instructions.
8395       if (!VisitedInstrs.count(P) && !R.isDeleted(P) &&
8396           isValidElementType(P->getType()))
8397         Incoming.push_back(P);
8398     }
8399 
8400     // Find the corresponding non-phi nodes for better matching when trying to
8401     // build the tree.
8402     for (Value *V : Incoming) {
8403       SmallVectorImpl<Value *> &Opcodes =
8404           PHIToOpcodes.try_emplace(V).first->getSecond();
8405       if (!Opcodes.empty())
8406         continue;
8407       SmallVector<Value *, 4> Nodes(1, V);
8408       SmallPtrSet<Value *, 4> Visited;
8409       while (!Nodes.empty()) {
8410         auto *PHI = cast<PHINode>(Nodes.pop_back_val());
8411         if (!Visited.insert(PHI).second)
8412           continue;
8413         for (Value *V : PHI->incoming_values()) {
8414           if (auto *PHI1 = dyn_cast<PHINode>((V))) {
8415             Nodes.push_back(PHI1);
8416             continue;
8417           }
8418           Opcodes.emplace_back(V);
8419         }
8420       }
8421     }
8422 
8423     // Sort by type, parent, operands.
8424     stable_sort(Incoming, [this, &PHIToOpcodes](Value *V1, Value *V2) {
8425       assert(isValidElementType(V1->getType()) &&
8426              isValidElementType(V2->getType()) &&
8427              "Expected vectorizable types only.");
8428       // It is fine to compare type IDs here, since we expect only vectorizable
8429       // types, like ints, floats and pointers, we don't care about other type.
8430       if (V1->getType()->getTypeID() < V2->getType()->getTypeID())
8431         return true;
8432       if (V1->getType()->getTypeID() > V2->getType()->getTypeID())
8433         return false;
8434       ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1];
8435       ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2];
8436       if (Opcodes1.size() < Opcodes2.size())
8437         return true;
8438       if (Opcodes1.size() > Opcodes2.size())
8439         return false;
8440       for (int I = 0, E = Opcodes1.size(); I < E; ++I) {
8441         // Undefs are compatible with any other value.
8442         if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I]))
8443           continue;
8444         if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I]))
8445           if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) {
8446             DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent());
8447             DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent());
8448             if (!NodeI1)
8449               return NodeI2 != nullptr;
8450             if (!NodeI2)
8451               return false;
8452             assert((NodeI1 == NodeI2) ==
8453                        (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&
8454                    "Different nodes should have different DFS numbers");
8455             if (NodeI1 != NodeI2)
8456               return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn();
8457             InstructionsState S = getSameOpcode({I1, I2});
8458             if (S.getOpcode())
8459               continue;
8460             return I1->getOpcode() < I2->getOpcode();
8461           }
8462         if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I]))
8463           continue;
8464         if (Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID())
8465           return true;
8466         if (Opcodes1[I]->getValueID() > Opcodes2[I]->getValueID())
8467           return false;
8468       }
8469       return false;
8470     });
8471 
8472     auto &&AreCompatiblePHIs = [&PHIToOpcodes](Value *V1, Value *V2) {
8473       if (V1 == V2)
8474         return true;
8475       if (V1->getType() != V2->getType())
8476         return false;
8477       ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1];
8478       ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2];
8479       if (Opcodes1.size() != Opcodes2.size())
8480         return false;
8481       for (int I = 0, E = Opcodes1.size(); I < E; ++I) {
8482         // Undefs are compatible with any other value.
8483         if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I]))
8484           continue;
8485         if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I]))
8486           if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) {
8487             if (I1->getParent() != I2->getParent())
8488               return false;
8489             InstructionsState S = getSameOpcode({I1, I2});
8490             if (S.getOpcode())
8491               continue;
8492             return false;
8493           }
8494         if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I]))
8495           continue;
8496         if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID())
8497           return false;
8498       }
8499       return true;
8500     };
8501 
8502     // Try to vectorize elements base on their type.
8503     SmallVector<Value *, 4> Candidates;
8504     for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(),
8505                                            E = Incoming.end();
8506          IncIt != E;) {
8507 
8508       // Look for the next elements with the same type, parent and operand
8509       // kinds.
8510       SmallVector<Value *, 4>::iterator SameTypeIt = IncIt;
8511       while (SameTypeIt != E && AreCompatiblePHIs(*SameTypeIt, *IncIt)) {
8512         VisitedInstrs.insert(*SameTypeIt);
8513         ++SameTypeIt;
8514       }
8515 
8516       // Try to vectorize them.
8517       unsigned NumElts = (SameTypeIt - IncIt);
8518       LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at PHIs ("
8519                         << NumElts << ")\n");
8520       // The order in which the phi nodes appear in the program does not matter.
8521       // So allow tryToVectorizeList to reorder them if it is beneficial. This
8522       // is done when there are exactly two elements since tryToVectorizeList
8523       // asserts that there are only two values when AllowReorder is true.
8524       if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R,
8525                                             /*AllowReorder=*/true)) {
8526         // Success start over because instructions might have been changed.
8527         HaveVectorizedPhiNodes = true;
8528         Changed = true;
8529       } else if (NumElts < 4 &&
8530                  (Candidates.empty() ||
8531                   Candidates.front()->getType() == (*IncIt)->getType())) {
8532         Candidates.append(IncIt, std::next(IncIt, NumElts));
8533       }
8534       // Final attempt to vectorize phis with the same types.
8535       if (SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType()) {
8536         if (Candidates.size() > 1 &&
8537             tryToVectorizeList(Candidates, R, /*AllowReorder=*/true)) {
8538           // Success start over because instructions might have been changed.
8539           HaveVectorizedPhiNodes = true;
8540           Changed = true;
8541         }
8542         Candidates.clear();
8543       }
8544 
8545       // Start over at the next instruction of a different type (or the end).
8546       IncIt = SameTypeIt;
8547     }
8548   }
8549 
8550   VisitedInstrs.clear();
8551 
8552   SmallVector<Instruction *, 8> PostProcessInstructions;
8553   SmallDenseSet<Instruction *, 4> KeyNodes;
8554   for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
8555     // Skip instructions with scalable type. The num of elements is unknown at
8556     // compile-time for scalable type.
8557     if (isa<ScalableVectorType>(it->getType()))
8558       continue;
8559 
8560     // Skip instructions marked for the deletion.
8561     if (R.isDeleted(&*it))
8562       continue;
8563     // We may go through BB multiple times so skip the one we have checked.
8564     if (!VisitedInstrs.insert(&*it).second) {
8565       if (it->use_empty() && KeyNodes.contains(&*it) &&
8566           vectorizeSimpleInstructions(PostProcessInstructions, BB, R,
8567                                       it->isTerminator())) {
8568         // We would like to start over since some instructions are deleted
8569         // and the iterator may become invalid value.
8570         Changed = true;
8571         it = BB->begin();
8572         e = BB->end();
8573       }
8574       continue;
8575     }
8576 
8577     if (isa<DbgInfoIntrinsic>(it))
8578       continue;
8579 
8580     // Try to vectorize reductions that use PHINodes.
8581     if (PHINode *P = dyn_cast<PHINode>(it)) {
8582       // Check that the PHI is a reduction PHI.
8583       if (P->getNumIncomingValues() == 2) {
8584         // Try to match and vectorize a horizontal reduction.
8585         if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R,
8586                                      TTI)) {
8587           Changed = true;
8588           it = BB->begin();
8589           e = BB->end();
8590           continue;
8591         }
8592       }
8593       // Try to vectorize the incoming values of the PHI, to catch reductions
8594       // that feed into PHIs.
8595       for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) {
8596         // Skip if the incoming block is the current BB for now. Also, bypass
8597         // unreachable IR for efficiency and to avoid crashing.
8598         // TODO: Collect the skipped incoming values and try to vectorize them
8599         // after processing BB.
8600         if (BB == P->getIncomingBlock(I) ||
8601             !DT->isReachableFromEntry(P->getIncomingBlock(I)))
8602           continue;
8603 
8604         Changed |= vectorizeRootInstruction(nullptr, P->getIncomingValue(I),
8605                                             P->getIncomingBlock(I), R, TTI);
8606       }
8607       continue;
8608     }
8609 
8610     // Ran into an instruction without users, like terminator, or function call
8611     // with ignored return value, store. Ignore unused instructions (basing on
8612     // instruction type, except for CallInst and InvokeInst).
8613     if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) ||
8614                             isa<InvokeInst>(it))) {
8615       KeyNodes.insert(&*it);
8616       bool OpsChanged = false;
8617       if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) {
8618         for (auto *V : it->operand_values()) {
8619           // Try to match and vectorize a horizontal reduction.
8620           OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI);
8621         }
8622       }
8623       // Start vectorization of post-process list of instructions from the
8624       // top-tree instructions to try to vectorize as many instructions as
8625       // possible.
8626       OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R,
8627                                                 it->isTerminator());
8628       if (OpsChanged) {
8629         // We would like to start over since some instructions are deleted
8630         // and the iterator may become invalid value.
8631         Changed = true;
8632         it = BB->begin();
8633         e = BB->end();
8634         continue;
8635       }
8636     }
8637 
8638     if (isa<InsertElementInst>(it) || isa<CmpInst>(it) ||
8639         isa<InsertValueInst>(it))
8640       PostProcessInstructions.push_back(&*it);
8641   }
8642 
8643   return Changed;
8644 }
8645 
8646 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) {
8647   auto Changed = false;
8648   for (auto &Entry : GEPs) {
8649     // If the getelementptr list has fewer than two elements, there's nothing
8650     // to do.
8651     if (Entry.second.size() < 2)
8652       continue;
8653 
8654     LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "
8655                       << Entry.second.size() << ".\n");
8656 
8657     // Process the GEP list in chunks suitable for the target's supported
8658     // vector size. If a vector register can't hold 1 element, we are done. We
8659     // are trying to vectorize the index computations, so the maximum number of
8660     // elements is based on the size of the index expression, rather than the
8661     // size of the GEP itself (the target's pointer size).
8662     unsigned MaxVecRegSize = R.getMaxVecRegSize();
8663     unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin());
8664     if (MaxVecRegSize < EltSize)
8665       continue;
8666 
8667     unsigned MaxElts = MaxVecRegSize / EltSize;
8668     for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) {
8669       auto Len = std::min<unsigned>(BE - BI, MaxElts);
8670       ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len);
8671 
8672       // Initialize a set a candidate getelementptrs. Note that we use a
8673       // SetVector here to preserve program order. If the index computations
8674       // are vectorizable and begin with loads, we want to minimize the chance
8675       // of having to reorder them later.
8676       SetVector<Value *> Candidates(GEPList.begin(), GEPList.end());
8677 
8678       // Some of the candidates may have already been vectorized after we
8679       // initially collected them. If so, they are marked as deleted, so remove
8680       // them from the set of candidates.
8681       Candidates.remove_if(
8682           [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); });
8683 
8684       // Remove from the set of candidates all pairs of getelementptrs with
8685       // constant differences. Such getelementptrs are likely not good
8686       // candidates for vectorization in a bottom-up phase since one can be
8687       // computed from the other. We also ensure all candidate getelementptr
8688       // indices are unique.
8689       for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) {
8690         auto *GEPI = GEPList[I];
8691         if (!Candidates.count(GEPI))
8692           continue;
8693         auto *SCEVI = SE->getSCEV(GEPList[I]);
8694         for (int J = I + 1; J < E && Candidates.size() > 1; ++J) {
8695           auto *GEPJ = GEPList[J];
8696           auto *SCEVJ = SE->getSCEV(GEPList[J]);
8697           if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) {
8698             Candidates.remove(GEPI);
8699             Candidates.remove(GEPJ);
8700           } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) {
8701             Candidates.remove(GEPJ);
8702           }
8703         }
8704       }
8705 
8706       // We break out of the above computation as soon as we know there are
8707       // fewer than two candidates remaining.
8708       if (Candidates.size() < 2)
8709         continue;
8710 
8711       // Add the single, non-constant index of each candidate to the bundle. We
8712       // ensured the indices met these constraints when we originally collected
8713       // the getelementptrs.
8714       SmallVector<Value *, 16> Bundle(Candidates.size());
8715       auto BundleIndex = 0u;
8716       for (auto *V : Candidates) {
8717         auto *GEP = cast<GetElementPtrInst>(V);
8718         auto *GEPIdx = GEP->idx_begin()->get();
8719         assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx));
8720         Bundle[BundleIndex++] = GEPIdx;
8721       }
8722 
8723       // Try and vectorize the indices. We are currently only interested in
8724       // gather-like cases of the form:
8725       //
8726       // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ...
8727       //
8728       // where the loads of "a", the loads of "b", and the subtractions can be
8729       // performed in parallel. It's likely that detecting this pattern in a
8730       // bottom-up phase will be simpler and less costly than building a
8731       // full-blown top-down phase beginning at the consecutive loads.
8732       Changed |= tryToVectorizeList(Bundle, R);
8733     }
8734   }
8735   return Changed;
8736 }
8737 
8738 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) {
8739   bool Changed = false;
8740   // Sort by type, base pointers and values operand. Value operands must be
8741   // compatible (have the same opcode, same parent), otherwise it is
8742   // definitely not profitable to try to vectorize them.
8743   auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) {
8744     if (V->getPointerOperandType()->getTypeID() <
8745         V2->getPointerOperandType()->getTypeID())
8746       return true;
8747     if (V->getPointerOperandType()->getTypeID() >
8748         V2->getPointerOperandType()->getTypeID())
8749       return false;
8750     // UndefValues are compatible with all other values.
8751     if (isa<UndefValue>(V->getValueOperand()) ||
8752         isa<UndefValue>(V2->getValueOperand()))
8753       return false;
8754     if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand()))
8755       if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) {
8756         DomTreeNodeBase<llvm::BasicBlock> *NodeI1 =
8757             DT->getNode(I1->getParent());
8758         DomTreeNodeBase<llvm::BasicBlock> *NodeI2 =
8759             DT->getNode(I2->getParent());
8760         assert(NodeI1 && "Should only process reachable instructions");
8761         assert(NodeI1 && "Should only process reachable instructions");
8762         assert((NodeI1 == NodeI2) ==
8763                    (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&
8764                "Different nodes should have different DFS numbers");
8765         if (NodeI1 != NodeI2)
8766           return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn();
8767         InstructionsState S = getSameOpcode({I1, I2});
8768         if (S.getOpcode())
8769           return false;
8770         return I1->getOpcode() < I2->getOpcode();
8771       }
8772     if (isa<Constant>(V->getValueOperand()) &&
8773         isa<Constant>(V2->getValueOperand()))
8774       return false;
8775     return V->getValueOperand()->getValueID() <
8776            V2->getValueOperand()->getValueID();
8777   };
8778 
8779   auto &&AreCompatibleStores = [](StoreInst *V1, StoreInst *V2) {
8780     if (V1 == V2)
8781       return true;
8782     if (V1->getPointerOperandType() != V2->getPointerOperandType())
8783       return false;
8784     // Undefs are compatible with any other value.
8785     if (isa<UndefValue>(V1->getValueOperand()) ||
8786         isa<UndefValue>(V2->getValueOperand()))
8787       return true;
8788     if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand()))
8789       if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) {
8790         if (I1->getParent() != I2->getParent())
8791           return false;
8792         InstructionsState S = getSameOpcode({I1, I2});
8793         return S.getOpcode() > 0;
8794       }
8795     if (isa<Constant>(V1->getValueOperand()) &&
8796         isa<Constant>(V2->getValueOperand()))
8797       return true;
8798     return V1->getValueOperand()->getValueID() ==
8799            V2->getValueOperand()->getValueID();
8800   };
8801 
8802   // Attempt to sort and vectorize each of the store-groups.
8803   for (auto &Pair : Stores) {
8804     if (Pair.second.size() < 2)
8805       continue;
8806 
8807     LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
8808                       << Pair.second.size() << ".\n");
8809 
8810     stable_sort(Pair.second, StoreSorter);
8811 
8812     // Try to vectorize elements based on their compatibility.
8813     for (ArrayRef<StoreInst *>::iterator IncIt = Pair.second.begin(),
8814                                          E = Pair.second.end();
8815          IncIt != E;) {
8816 
8817       // Look for the next elements with the same type.
8818       ArrayRef<StoreInst *>::iterator SameTypeIt = IncIt;
8819       Type *EltTy = (*IncIt)->getPointerOperand()->getType();
8820 
8821       while (SameTypeIt != E && AreCompatibleStores(*SameTypeIt, *IncIt))
8822         ++SameTypeIt;
8823 
8824       // Try to vectorize them.
8825       unsigned NumElts = (SameTypeIt - IncIt);
8826       LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at stores ("
8827                         << NumElts << ")\n");
8828       if (NumElts > 1 && !EltTy->getPointerElementType()->isVectorTy() &&
8829           vectorizeStores(makeArrayRef(IncIt, NumElts), R)) {
8830         // Success start over because instructions might have been changed.
8831         Changed = true;
8832       }
8833 
8834       // Start over at the next instruction of a different type (or the end).
8835       IncIt = SameTypeIt;
8836     }
8837   }
8838   return Changed;
8839 }
8840 
8841 char SLPVectorizer::ID = 0;
8842 
8843 static const char lv_name[] = "SLP Vectorizer";
8844 
8845 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false)
8846 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
8847 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
8848 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
8849 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
8850 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
8851 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
8852 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
8853 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
8854 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false)
8855 
8856 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); }
8857